source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__minv_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_int16
// op(A') function: GB_tran__minv_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_int16
(
uint64_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__gt_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint64)
// A*D function (colscale): GB (_AxD__gt_uint64)
// D*A function (rowscale): GB (_DxB__gt_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint64)
// C=scalar+B GB (_bind1st__gt_uint64)
// C=scalar+B' GB (_bind1st_tran__gt_uint64)
// C=A+scalar GB (_bind2nd__gt_uint64)
// C=A'+scalar GB (_bind2nd_tran__gt_uint64)
// C type: bool
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT64 || GxB_NO_GT_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
single.c | #include <stdio.h>
#include <omp.h>
int main() {
int n = 9, i, a, b[n];
for (i=0; i<n; i++) b[i] = -1;
#pragma omp parallel
{
#pragma omp single
{ printf("Introduce valor de inicialización a: ");
scanf("%d", &a );
printf("Single ejecutada por el thread %d\n",
omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
}
printf("Depués de la región parallel:\n");
for (i=0; i<n; i++) printf("b[%d] = %d\t",i,b[i]);
printf("\n");
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
size_t target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template<typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i) return false;
}
return true;
}
template<typename xpu, bool is_addto = false>
bool TransposeCommonImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
// return true when running successfully, otherwise false
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return true;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType, is_addto>(ret, src, axes, s);
});
return true;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return true;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
// return false when dimensions > 6
return false;
break;
}
});
return true;
}
template<typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
CHECK_LE(axes.ndim(), 6) << "TransposeImpl supports at most 6 dimensions";
CHECK((TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes))) <<
"Failed to execute TransposeImpl Operator";
}
template <bool is_addto>
struct TransposeExKernel {
/*!
* \brief
* \param tid global thread id
* \param out_data output data
* \param in_data input data
* \param strides input strides and output strides
* \param ndim the number of dimension
*/
template <typename DType>
MSHADOW_XINLINE static void Map(index_t tid,
DType *out_data,
const DType *in_data,
const dim_t *strides,
const int ndim
) {
// tid is the index of input data
const dim_t* const out_strides = strides + ndim;
index_t k = tid;
index_t out_id = 0;
for (int i = 0; i < ndim; ++i) {
out_id += (k / strides[i]) * out_strides[i];
k %= strides[i];
}
if (is_addto)
out_data[out_id] += in_data[tid];
else
out_data[out_id] = in_data[tid];
}
};
template<typename xpu, bool is_addto = false>
void TransposeExImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes,
mshadow::Tensor<xpu, 1, dim_t>& strides_xpu
) {
/*
* If ndim <= 6, it is not necessary to allocate any space for `strides_xpu`
* If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements
*/
using namespace mshadow;
using namespace mshadow::expr;
if (TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes)) return;
CHECK_GT(axes.ndim(), 6) <<
"Failed to execute TransposeExImpl when axes.ndim() <= 6";
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
CHECK_EQ(strides_xpu.MSize(), axes.ndim() * 2) << \
"If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements";
const mxnet::TShape &in_shape = src.shape_;
// strides: in_strides and out_strides
const int ndim = axes.ndim();
std::vector<dim_t> strides(ndim * 2);
// compute in_strides
strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * in_shape[i + 1];
}
// compute out_strides
std::vector<dim_t> tmp_strides(ndim);
tmp_strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
tmp_strides[i] = tmp_strides[i + 1] * in_shape[axes[i + 1]];
}
// reorder tmp_strides to out_strides
dim_t * const out_strides = &strides[ndim];
for (int i = 0; i < ndim; ++i) {
out_strides[axes[i]] = tmp_strides[i];
}
Shape<1> strides_shape;
strides_shape[0] = ndim * 2;
Tensor<cpu, 1, dim_t> strides_cpu(strides.data(), strides_shape);
// copy arguments into xpu context
Copy(strides_xpu, strides_cpu, s);
const DType *in = src.dptr<DType>();
DType *out = ret.dptr<DType>();
if (is_addto) {
mxnet_op::Kernel<TransposeExKernel<true>, xpu>::Launch(s,
in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
} else {
mxnet_op::Kernel<TransposeExKernel<false>, xpu>::Launch(s,
in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
}
});
}
template<typename xpu>
mshadow::Tensor<xpu, 1, dim_t> GetTransposeExWorkspace(
const OpContext& ctx,
const mxnet::TShape& axes
) {
if (axes.ndim() > 6) {
// allocate workspace when axes.ndim() > 6
mshadow::Shape<1> strides_shape;
strides_shape[0] = axes.ndim() * 2;
return ctx.requested[0].get_space_typed<xpu, 1, dim_t>(
strides_shape, ctx.get_stream<xpu>());
}
return {};
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
mshadow::Tensor<xpu, 1, dim_t> workspace =
GetTransposeExWorkspace<xpu>(ctx, axes);
if (req[0] == kAddTo) {
TransposeExImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0],
axes, workspace);
} else {
TransposeExImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0],
axes, workspace);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp))
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) {
return false;
}
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_ONEDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
index_t num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) {
return false;
}
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream a_min_s, a_max_s;
a_min_s << a_min;
a_max_s << a_max;
(*dict)["a_min"] = a_min_s.str();
(*dict)["a_max"] = a_max_s.str();
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream repeats_s, axis_s;
repeats_s << repeats;
axis_s << axis;
(*dict)["repeats"] = repeats_s.str();
(*dict)["axis"] = axis_s.str();
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) {
return false;
}
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream reps_s;
reps_s << reps;
(*dict)["reps"] = reps_s.str();
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, num_args_s;
axis_s << axis;
num_args_s << num_args;
(*dict)["axis"] = axis_s.str();
(*dict)["num_args"] = num_args_s.str();
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s;
indices_s << indices;
axis_s << axis;
squeeze_axis_s << squeeze_axis;
sections_s << sections;
(*dict)["indices"] = indices_s.str();
(*dict)["axis"] = axis_s.str();
(*dict)["squeeze_axis"] = squeeze_axis_s.str();
(*dict)["sections"] = sections_s.str();
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t) (ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if ( i < section_a ) {
indices[i+1] = section_size_a * (i + 1);
} else {
indices[i+1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
index_t start = indices[i];
index_t end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
omp_schedule.c | #include <unistd.h>
#include <stdlib.h>
#include <omp.h>
#include <stdio.h>
#define THREADS 4
#define CHUNK 3
#define N 20
int main ( ) {
unsigned int i;
// default chunk is N / numthread
#pragma omp parallel for schedule(static, CHUNK) num_threads(THREADS)
for (i = 0; i < N; i++) {
/* wait for i seconds */
sleep(i);
printf("Thread %d has completed iteration %d.\n", omp_get_thread_num( ), i);
}
/* all threads done */
printf("All done!\n");
return 0;
}
|
surfmorph_codegen.h |
////////////////////////////////////////////////////////////////////////////////
// This code has been automatically generated with SymPy ///////////////////////
////////////////////////////////////////////////////////////////////////////////
#ifndef SURFMORPH_CODEGEN_H
#define SURFMORPH_CODEGEN_H
namespace surfmorph
{
template<typename ScalarT,
typename IdxT,
typename PointT,
typename VectorT,
typename MatrixT,
typename CoeffsT,
typename IndepsT>
void updateInterpolationSystem
(
const PointT &,
const PointT &,
const PointT &,
const PointT &p3,
const IdxT p0Idx,
const IdxT p1Idx,
const IdxT p2Idx,
const IdxT p3Idx,
const MatrixT &uInv,
const MatrixT &m,
const VectorT &t,
const ScalarT Ai,
const ScalarT alpha,
CoeffsT &coeffs,
IndepsT &indeps
)
{
// Make sure coeff and indeps data is allocated
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp single
#endif
{
coeffs[{3 * p0Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p0Idx + 0}];
coeffs[{3 * p0Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p1Idx + 0}];
coeffs[{3 * p0Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p2Idx + 0}];
coeffs[{3 * p0Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p0Idx + 0, 3 * p3Idx + 0}];
indeps[3 * p0Idx + 0] = indeps[3 * p0Idx + 0];
coeffs[{3 * p0Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p0Idx + 1}];
coeffs[{3 * p0Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p1Idx + 1}];
coeffs[{3 * p0Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p2Idx + 1}];
coeffs[{3 * p0Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p0Idx + 1, 3 * p3Idx + 1}];
indeps[3 * p0Idx + 1] = indeps[3 * p0Idx + 1];
coeffs[{3 * p0Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p0Idx + 2}];
coeffs[{3 * p0Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p1Idx + 2}];
coeffs[{3 * p0Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p2Idx + 2}];
coeffs[{3 * p0Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p0Idx + 2, 3 * p3Idx + 2}];
indeps[3 * p0Idx + 2] = indeps[3 * p0Idx + 2];
coeffs[{3 * p1Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p0Idx + 0}];
coeffs[{3 * p1Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p1Idx + 0}];
coeffs[{3 * p1Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p2Idx + 0}];
coeffs[{3 * p1Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p1Idx + 0, 3 * p3Idx + 0}];
indeps[3 * p1Idx + 0] = indeps[3 * p1Idx + 0];
coeffs[{3 * p1Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p0Idx + 1}];
coeffs[{3 * p1Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p1Idx + 1}];
coeffs[{3 * p1Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p2Idx + 1}];
coeffs[{3 * p1Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p1Idx + 1, 3 * p3Idx + 1}];
indeps[3 * p1Idx + 1] = indeps[3 * p1Idx + 1];
coeffs[{3 * p1Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p0Idx + 2}];
coeffs[{3 * p1Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p1Idx + 2}];
coeffs[{3 * p1Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p2Idx + 2}];
coeffs[{3 * p1Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p1Idx + 2, 3 * p3Idx + 2}];
indeps[3 * p1Idx + 2] = indeps[3 * p1Idx + 2];
coeffs[{3 * p2Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p0Idx + 0}];
coeffs[{3 * p2Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p1Idx + 0}];
coeffs[{3 * p2Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p2Idx + 0}];
coeffs[{3 * p2Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p2Idx + 0, 3 * p3Idx + 0}];
indeps[3 * p2Idx + 0] = indeps[3 * p2Idx + 0];
coeffs[{3 * p2Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p0Idx + 1}];
coeffs[{3 * p2Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p1Idx + 1}];
coeffs[{3 * p2Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p2Idx + 1}];
coeffs[{3 * p2Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p2Idx + 1, 3 * p3Idx + 1}];
indeps[3 * p2Idx + 1] = indeps[3 * p2Idx + 1];
coeffs[{3 * p2Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p0Idx + 2}];
coeffs[{3 * p2Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p1Idx + 2}];
coeffs[{3 * p2Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p2Idx + 2}];
coeffs[{3 * p2Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p2Idx + 2, 3 * p3Idx + 2}];
indeps[3 * p2Idx + 2] = indeps[3 * p2Idx + 2];
coeffs[{3 * p3Idx + 0, 3 * p0Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p0Idx + 0}];
coeffs[{3 * p3Idx + 0, 3 * p1Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p1Idx + 0}];
coeffs[{3 * p3Idx + 0, 3 * p2Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p2Idx + 0}];
coeffs[{3 * p3Idx + 0, 3 * p3Idx + 0}] = coeffs[{3 * p3Idx + 0, 3 * p3Idx + 0}];
indeps[3 * p3Idx + 0] = indeps[3 * p3Idx + 0];
coeffs[{3 * p3Idx + 1, 3 * p0Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p0Idx + 1}];
coeffs[{3 * p3Idx + 1, 3 * p1Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p1Idx + 1}];
coeffs[{3 * p3Idx + 1, 3 * p2Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p2Idx + 1}];
coeffs[{3 * p3Idx + 1, 3 * p3Idx + 1}] = coeffs[{3 * p3Idx + 1, 3 * p3Idx + 1}];
indeps[3 * p3Idx + 1] = indeps[3 * p3Idx + 1];
coeffs[{3 * p3Idx + 2, 3 * p0Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p0Idx + 2}];
coeffs[{3 * p3Idx + 2, 3 * p1Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p1Idx + 2}];
coeffs[{3 * p3Idx + 2, 3 * p2Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p2Idx + 2}];
coeffs[{3 * p3Idx + 2, 3 * p3Idx + 2}] = coeffs[{3 * p3Idx + 2, 3 * p3Idx + 2}];
indeps[3 * p3Idx + 2] = indeps[3 * p3Idx + 2];
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp sections
#endif
{
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 1
coeffs[{3 * p0Idx + 0, 3 * p0Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + (uInv(0, 0)*uInv(0, 0)) + (uInv(0, 1)*uInv(0, 1)) + (uInv(0, 2)*uInv(0, 2)));
coeffs[{3 * p0Idx + 0, 3 * p1Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2));
coeffs[{3 * p0Idx + 0, 3 * p2Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p0Idx + 0, 3 * p3Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2));
indeps[3 * p0Idx + 0] += -2*Ai*(alpha*p3(0)*t(0)*uInv(0, 0) + alpha*p3(1)*t(0)*uInv(0, 1) + alpha*p3(2)*t(0)*uInv(0, 2) - m(0, 0)*uInv(0, 0) - m(0, 1)*uInv(0, 1) - m(0, 2)*uInv(0, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 2
coeffs[{3 * p0Idx + 1, 3 * p0Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + (uInv(0, 0)*uInv(0, 0)) + (uInv(0, 1)*uInv(0, 1)) + (uInv(0, 2)*uInv(0, 2)));
coeffs[{3 * p0Idx + 1, 3 * p1Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2));
coeffs[{3 * p0Idx + 1, 3 * p2Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p0Idx + 1, 3 * p3Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2));
indeps[3 * p0Idx + 1] += -2*Ai*(alpha*p3(0)*t(1)*uInv(0, 0) + alpha*p3(1)*t(1)*uInv(0, 1) + alpha*p3(2)*t(1)*uInv(0, 2) - m(1, 0)*uInv(0, 0) - m(1, 1)*uInv(0, 1) - m(1, 2)*uInv(0, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 3
coeffs[{3 * p0Idx + 2, 3 * p0Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + (uInv(0, 0)*uInv(0, 0)) + (uInv(0, 1)*uInv(0, 1)) + (uInv(0, 2)*uInv(0, 2)));
coeffs[{3 * p0Idx + 2, 3 * p1Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2));
coeffs[{3 * p0Idx + 2, 3 * p2Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p0Idx + 2, 3 * p3Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2));
indeps[3 * p0Idx + 2] += -2*Ai*(alpha*p3(0)*t(2)*uInv(0, 0) + alpha*p3(1)*t(2)*uInv(0, 1) + alpha*p3(2)*t(2)*uInv(0, 2) - m(2, 0)*uInv(0, 0) - m(2, 1)*uInv(0, 1) - m(2, 2)*uInv(0, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 4
coeffs[{3 * p1Idx + 0, 3 * p0Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2));
coeffs[{3 * p1Idx + 0, 3 * p1Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + (uInv(1, 0)*uInv(1, 0)) + (uInv(1, 1)*uInv(1, 1)) + (uInv(1, 2)*uInv(1, 2)));
coeffs[{3 * p1Idx + 0, 3 * p2Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p1Idx + 0, 3 * p3Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2));
indeps[3 * p1Idx + 0] += -2*Ai*(alpha*p3(0)*t(0)*uInv(1, 0) + alpha*p3(1)*t(0)*uInv(1, 1) + alpha*p3(2)*t(0)*uInv(1, 2) - m(0, 0)*uInv(1, 0) - m(0, 1)*uInv(1, 1) - m(0, 2)*uInv(1, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 5
coeffs[{3 * p1Idx + 1, 3 * p0Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2));
coeffs[{3 * p1Idx + 1, 3 * p1Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + (uInv(1, 0)*uInv(1, 0)) + (uInv(1, 1)*uInv(1, 1)) + (uInv(1, 2)*uInv(1, 2)));
coeffs[{3 * p1Idx + 1, 3 * p2Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p1Idx + 1, 3 * p3Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2));
indeps[3 * p1Idx + 1] += -2*Ai*(alpha*p3(0)*t(1)*uInv(1, 0) + alpha*p3(1)*t(1)*uInv(1, 1) + alpha*p3(2)*t(1)*uInv(1, 2) - m(1, 0)*uInv(1, 0) - m(1, 1)*uInv(1, 1) - m(1, 2)*uInv(1, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 6
coeffs[{3 * p1Idx + 2, 3 * p0Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2));
coeffs[{3 * p1Idx + 2, 3 * p1Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + (uInv(1, 0)*uInv(1, 0)) + (uInv(1, 1)*uInv(1, 1)) + (uInv(1, 2)*uInv(1, 2)));
coeffs[{3 * p1Idx + 2, 3 * p2Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p1Idx + 2, 3 * p3Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2));
indeps[3 * p1Idx + 2] += -2*Ai*(alpha*p3(0)*t(2)*uInv(1, 0) + alpha*p3(1)*t(2)*uInv(1, 1) + alpha*p3(2)*t(2)*uInv(1, 2) - m(2, 0)*uInv(1, 0) - m(2, 1)*uInv(1, 1) - m(2, 2)*uInv(1, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 7
coeffs[{3 * p2Idx + 0, 3 * p0Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p2Idx + 0, 3 * p1Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p2Idx + 0, 3 * p2Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
coeffs[{3 * p2Idx + 0, 3 * p3Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
indeps[3 * p2Idx + 0] += -2*Ai*(alpha*p3(0)*t(0)*uInv(2, 0) + alpha*p3(1)*t(0)*uInv(2, 1) + alpha*p3(2)*t(0)*uInv(2, 2) - m(0, 0)*uInv(2, 0) - m(0, 1)*uInv(2, 1) - m(0, 2)*uInv(2, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 8
coeffs[{3 * p2Idx + 1, 3 * p0Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p2Idx + 1, 3 * p1Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p2Idx + 1, 3 * p2Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
coeffs[{3 * p2Idx + 1, 3 * p3Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
indeps[3 * p2Idx + 1] += -2*Ai*(alpha*p3(0)*t(1)*uInv(2, 0) + alpha*p3(1)*t(1)*uInv(2, 1) + alpha*p3(2)*t(1)*uInv(2, 2) - m(1, 0)*uInv(2, 0) - m(1, 1)*uInv(2, 1) - m(1, 2)*uInv(2, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 9
coeffs[{3 * p2Idx + 2, 3 * p0Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p2Idx + 2, 3 * p1Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p2Idx + 2, 3 * p2Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
coeffs[{3 * p2Idx + 2, 3 * p3Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
indeps[3 * p2Idx + 2] += -2*Ai*(alpha*p3(0)*t(2)*uInv(2, 0) + alpha*p3(1)*t(2)*uInv(2, 1) + alpha*p3(2)*t(2)*uInv(2, 2) - m(2, 0)*uInv(2, 0) - m(2, 1)*uInv(2, 1) - m(2, 2)*uInv(2, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 10
coeffs[{3 * p3Idx + 0, 3 * p0Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p3Idx + 0, 3 * p1Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p3Idx + 0, 3 * p2Idx + 0}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
coeffs[{3 * p3Idx + 0, 3 * p3Idx + 0}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + 2*alpha*p3(0)*uInv(0, 0) + 2*alpha*p3(0)*uInv(1, 0) + 2*alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + 2*alpha*p3(1)*uInv(0, 1) + 2*alpha*p3(1)*uInv(1, 1) + 2*alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + 2*alpha*p3(2)*uInv(0, 2) + 2*alpha*p3(2)*uInv(1, 2) + 2*alpha*p3(2)*uInv(2, 2) + alpha + (uInv(0, 0)*uInv(0, 0)) + 2*uInv(0, 0)*uInv(1, 0) + 2*uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + 2*uInv(0, 1)*uInv(1, 1) + 2*uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + 2*uInv(0, 2)*uInv(1, 2) + 2*uInv(0, 2)*uInv(2, 2) + (uInv(1, 0)*uInv(1, 0)) + 2*uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + 2*uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + 2*uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
indeps[3 * p3Idx + 0] += 2*Ai*(alpha*p3(0)*t(0)*uInv(0, 0) + alpha*p3(0)*t(0)*uInv(1, 0) + alpha*p3(0)*t(0)*uInv(2, 0) + alpha*p3(1)*t(0)*uInv(0, 1) + alpha*p3(1)*t(0)*uInv(1, 1) + alpha*p3(1)*t(0)*uInv(2, 1) + alpha*p3(2)*t(0)*uInv(0, 2) + alpha*p3(2)*t(0)*uInv(1, 2) + alpha*p3(2)*t(0)*uInv(2, 2) + alpha*t(0) - m(0, 0)*uInv(0, 0) - m(0, 0)*uInv(1, 0) - m(0, 0)*uInv(2, 0) - m(0, 1)*uInv(0, 1) - m(0, 1)*uInv(1, 1) - m(0, 1)*uInv(2, 1) - m(0, 2)*uInv(0, 2) - m(0, 2)*uInv(1, 2) - m(0, 2)*uInv(2, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 11
coeffs[{3 * p3Idx + 1, 3 * p0Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p3Idx + 1, 3 * p1Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p3Idx + 1, 3 * p2Idx + 1}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
coeffs[{3 * p3Idx + 1, 3 * p3Idx + 1}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + 2*alpha*p3(0)*uInv(0, 0) + 2*alpha*p3(0)*uInv(1, 0) + 2*alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + 2*alpha*p3(1)*uInv(0, 1) + 2*alpha*p3(1)*uInv(1, 1) + 2*alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + 2*alpha*p3(2)*uInv(0, 2) + 2*alpha*p3(2)*uInv(1, 2) + 2*alpha*p3(2)*uInv(2, 2) + alpha + (uInv(0, 0)*uInv(0, 0)) + 2*uInv(0, 0)*uInv(1, 0) + 2*uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + 2*uInv(0, 1)*uInv(1, 1) + 2*uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + 2*uInv(0, 2)*uInv(1, 2) + 2*uInv(0, 2)*uInv(2, 2) + (uInv(1, 0)*uInv(1, 0)) + 2*uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + 2*uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + 2*uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
indeps[3 * p3Idx + 1] += 2*Ai*(alpha*p3(0)*t(1)*uInv(0, 0) + alpha*p3(0)*t(1)*uInv(1, 0) + alpha*p3(0)*t(1)*uInv(2, 0) + alpha*p3(1)*t(1)*uInv(0, 1) + alpha*p3(1)*t(1)*uInv(1, 1) + alpha*p3(1)*t(1)*uInv(2, 1) + alpha*p3(2)*t(1)*uInv(0, 2) + alpha*p3(2)*t(1)*uInv(1, 2) + alpha*p3(2)*t(1)*uInv(2, 2) + alpha*t(1) - m(1, 0)*uInv(0, 0) - m(1, 0)*uInv(1, 0) - m(1, 0)*uInv(2, 0) - m(1, 1)*uInv(0, 1) - m(1, 1)*uInv(1, 1) - m(1, 1)*uInv(2, 1) - m(1, 2)*uInv(0, 2) - m(1, 2)*uInv(1, 2) - m(1, 2)*uInv(2, 2));
}
#ifdef SURFMORPH_PARALLELIZE_INTERPOLATION
#pragma omp section
#endif
{
// Equation 12
coeffs[{3 * p3Idx + 2, 3 * p0Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*uInv(0, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*uInv(0, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*p3(2)*uInv(0, 2) + (uInv(0, 0)*uInv(0, 0)) + uInv(0, 0)*uInv(1, 0) + uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + uInv(0, 1)*uInv(1, 1) + uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + uInv(0, 2)*uInv(1, 2) + uInv(0, 2)*uInv(2, 2));
coeffs[{3 * p3Idx + 2, 3 * p1Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + alpha*p3(0)*uInv(1, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + alpha*p3(1)*uInv(1, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*p3(2)*uInv(1, 2) + uInv(0, 0)*uInv(1, 0) + uInv(0, 1)*uInv(1, 1) + uInv(0, 2)*uInv(1, 2) + (uInv(1, 0)*uInv(1, 0)) + uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + uInv(1, 2)*uInv(2, 2));
coeffs[{3 * p3Idx + 2, 3 * p2Idx + 2}] += -2*Ai*(alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + alpha*p3(2)*uInv(2, 2) + uInv(0, 0)*uInv(2, 0) + uInv(0, 1)*uInv(2, 1) + uInv(0, 2)*uInv(2, 2) + uInv(1, 0)*uInv(2, 0) + uInv(1, 1)*uInv(2, 1) + uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
coeffs[{3 * p3Idx + 2, 3 * p3Idx + 2}] += 2*Ai*(alpha*(p3(0)*p3(0))*(uInv(0, 0)*uInv(0, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(1, 0) + 2*alpha*(p3(0)*p3(0))*uInv(0, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(1, 0)*uInv(1, 0)) + 2*alpha*(p3(0)*p3(0))*uInv(1, 0)*uInv(2, 0) + alpha*(p3(0)*p3(0))*(uInv(2, 0)*uInv(2, 0)) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(0, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(1, 0) + 2*alpha*p3(0)*p3(1)*uInv(0, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(1, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(1)*uInv(1, 1)*uInv(2, 0) + 2*alpha*p3(0)*p3(1)*uInv(2, 0)*uInv(2, 1) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(0, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(1, 0) + 2*alpha*p3(0)*p3(2)*uInv(0, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(1, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 0)*uInv(2, 2) + 2*alpha*p3(0)*p3(2)*uInv(1, 2)*uInv(2, 0) + 2*alpha*p3(0)*p3(2)*uInv(2, 0)*uInv(2, 2) + 2*alpha*p3(0)*uInv(0, 0) + 2*alpha*p3(0)*uInv(1, 0) + 2*alpha*p3(0)*uInv(2, 0) + alpha*(p3(1)*p3(1))*(uInv(0, 1)*uInv(0, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(1, 1) + 2*alpha*(p3(1)*p3(1))*uInv(0, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(1, 1)*uInv(1, 1)) + 2*alpha*(p3(1)*p3(1))*uInv(1, 1)*uInv(2, 1) + alpha*(p3(1)*p3(1))*(uInv(2, 1)*uInv(2, 1)) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(0, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(1, 1) + 2*alpha*p3(1)*p3(2)*uInv(0, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(1, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 1)*uInv(2, 2) + 2*alpha*p3(1)*p3(2)*uInv(1, 2)*uInv(2, 1) + 2*alpha*p3(1)*p3(2)*uInv(2, 1)*uInv(2, 2) + 2*alpha*p3(1)*uInv(0, 1) + 2*alpha*p3(1)*uInv(1, 1) + 2*alpha*p3(1)*uInv(2, 1) + alpha*(p3(2)*p3(2))*(uInv(0, 2)*uInv(0, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(1, 2) + 2*alpha*(p3(2)*p3(2))*uInv(0, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(1, 2)*uInv(1, 2)) + 2*alpha*(p3(2)*p3(2))*uInv(1, 2)*uInv(2, 2) + alpha*(p3(2)*p3(2))*(uInv(2, 2)*uInv(2, 2)) + 2*alpha*p3(2)*uInv(0, 2) + 2*alpha*p3(2)*uInv(1, 2) + 2*alpha*p3(2)*uInv(2, 2) + alpha + (uInv(0, 0)*uInv(0, 0)) + 2*uInv(0, 0)*uInv(1, 0) + 2*uInv(0, 0)*uInv(2, 0) + (uInv(0, 1)*uInv(0, 1)) + 2*uInv(0, 1)*uInv(1, 1) + 2*uInv(0, 1)*uInv(2, 1) + (uInv(0, 2)*uInv(0, 2)) + 2*uInv(0, 2)*uInv(1, 2) + 2*uInv(0, 2)*uInv(2, 2) + (uInv(1, 0)*uInv(1, 0)) + 2*uInv(1, 0)*uInv(2, 0) + (uInv(1, 1)*uInv(1, 1)) + 2*uInv(1, 1)*uInv(2, 1) + (uInv(1, 2)*uInv(1, 2)) + 2*uInv(1, 2)*uInv(2, 2) + (uInv(2, 0)*uInv(2, 0)) + (uInv(2, 1)*uInv(2, 1)) + (uInv(2, 2)*uInv(2, 2)));
indeps[3 * p3Idx + 2] += 2*Ai*(alpha*p3(0)*t(2)*uInv(0, 0) + alpha*p3(0)*t(2)*uInv(1, 0) + alpha*p3(0)*t(2)*uInv(2, 0) + alpha*p3(1)*t(2)*uInv(0, 1) + alpha*p3(1)*t(2)*uInv(1, 1) + alpha*p3(1)*t(2)*uInv(2, 1) + alpha*p3(2)*t(2)*uInv(0, 2) + alpha*p3(2)*t(2)*uInv(1, 2) + alpha*p3(2)*t(2)*uInv(2, 2) + alpha*t(2) - m(2, 0)*uInv(0, 0) - m(2, 0)*uInv(1, 0) - m(2, 0)*uInv(2, 0) - m(2, 1)*uInv(0, 1) - m(2, 1)*uInv(1, 1) - m(2, 1)*uInv(2, 1) - m(2, 2)*uInv(0, 2) - m(2, 2)*uInv(1, 2) - m(2, 2)*uInv(2, 2));
}
}
}
}
#endif
////////////////////////////////////////////////////////////////////////////////
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Example use of firstprivate()
*/
#include <omp.h>
void foo(int *a,int n,int g)
{
int i;
#pragma omp parallel for private (i) firstprivate (n,g)
for (i = 0; i <= n - 1; i += 1) {
a[i] = a[i] + g;
}
}
int a[100];
int main()
{
int i;
int n = 100;
#pragma omp parallel for private (i)
for (i = 0; i <= n - 1; i += 1) {
a[i] = i;
}
foo(a,100,7);
for (i = 0; i <= n - 1; i += 1) {
printf("%d\n",a[i]);
}
return 0;
}
|
pr59591-1.c | /* PR tree-optimization/59591 */
/* { dg-additional-options "-fopenmp-simd" } */
#ifndef CHECK_H
#include "tree-vect.h"
#endif
extern void abort (void);
int p[256], q[256], r[256], t[256];
__attribute__((noinline, noclone)) void
foo (void)
{
int i;
#pragma omp simd safelen(64)
for (i = 0; i < 256; i++)
if (r[i] > 32)
t[i] = p[q[i] * 3L + 2L];
}
__attribute__((noinline, noclone)) void
bar (void)
{
int i;
for (i = 0; i < 256; i++)
{
r[i] = ((i >> 2) & (1 << (i & 3))) ? 32 + i : 32 - i;
q[i] = r[i] > 32 ? ((i * 7) % 84) : 99 + i;
p[i] = i * 11;
t[i] = i * 13;
}
foo ();
for (i = 0; i < 256; i++)
if ((i >> 2) & (1 << (i & 3)))
{
if (t[i] != (((i * 7) % 84) * 3 + 2) * 11)
abort ();
}
else if (t[i] != i * 13)
abort ();
}
#ifndef CHECK_H
int
main ()
{
check_vect ();
bar ();
return 0;
}
#endif
|
ast-dump-openmp-declare-variant-extensions.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int picked1(void) { return 0; }
int picked2(void) { return 0; }
int picked3(void);
int picked4(void);
int picked5(void) { return 0; }
int picked6(void) { return 0; }
int picked7(void) { return 0; }
int not_picked1(void) { return 1; }
int not_picked2(void) { return 2; }
int not_picked3(void);
int not_picked4(void);
int not_picked5(void);
int not_picked6(void);
#pragma omp declare variant(picked1) match(implementation={extension(match_any)}, device={kind(cpu, gpu)})
int base1(void) { return 3; }
#pragma omp declare variant(picked2) match(implementation={extension(match_none)}, device={kind(gpu, fpga)})
int base2(void) { return 4; }
#pragma omp declare variant(picked3) match(implementation={vendor(pgi), extension(match_any)}, device={kind(cpu, gpu)})
int base3(void) { return 5; }
#pragma omp declare variant(picked4) match(user={condition(0)}, implementation={extension(match_none)}, device={kind(gpu, fpga)})
int base4(void) { return 6; }
#pragma omp declare variant(picked5) match(user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu)})
int base5(void) { return 7; }
#pragma omp declare variant(not_picked1) match(implementation={extension(match_any)}, device={kind(gpu, fpga)})
int base6(void) { return 0; }
#pragma omp declare variant(not_picked2) match(implementation={extension(match_none)}, device={kind(gpu, cpu)})
int base7(void) { return 0; }
#pragma omp declare variant(not_picked3) match(implementation={vendor(llvm), extension(match_any)}, device={kind(fpga, gpu)})
int base8(void) { return 0; }
#pragma omp declare variant(not_picked4) match(user={condition(1)}, implementation={extension(match_none)}, device={kind(gpu, fpga)})
int base9(void) { return 0; }
#pragma omp declare variant(not_picked5) match(user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu, gpu)})
int base10(void) { return 0; }
#pragma omp declare variant(not_picked6) match(implementation={extension(match_any)})
int base11(void) { return 0; }
#pragma omp declare variant(picked6) match(implementation={extension(match_all)})
int base12(void) { return 8; }
#pragma omp declare variant(picked7) match(implementation={extension(match_none)})
int base13(void) { return 9; }
#pragma omp begin declare variant match(implementation={extension(match_any)}, device={kind(cpu, gpu)})
int overloaded1(void) { return 0; }
#pragma omp end declare variant
int overloaded2(void) { return 1; }
#pragma omp begin declare variant match(implementation={extension(match_none)}, device={kind(fpga, gpu)})
int overloaded2(void) { return 0; }
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={extension(match_none)}, device={kind(cpu)})
NOT PARSED
#pragma omp end declare variant
int picked3(void) { return 0; }
int picked4(void) { return 0; }
int not_picked3(void) { return 10; }
int not_picked4(void) { return 11; }
int not_picked5(void) { return 12; }
int not_picked6(void) { return 13; }
int test(void) {
// Should return 0.
return base1() + base2() + base3() + base4() + base5() + base6() + base7() +
base8() + base9() + base10() + base11() + base12() + base13() +
overloaded1() + overloaded2();
}
// 1) All "picked" versions are called but none of the "non_picked" ones is.
// 2) The overloaded functions that return 0 are called.
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:31> col:5 referenced picked1 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] <line:6:1, col:31> col:5 referenced picked2 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <line:7:1, col:17> col:5 referenced picked3 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_9:0x[a-z0-9]*]] <line:8:1, col:17> col:5 referenced picked4 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:9:1, col:31> col:5 referenced picked5 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:10:1, col:31> col:5 referenced picked6 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_15:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_16:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_17:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_18:0x[a-z0-9]*]] <line:11:1, col:31> col:5 referenced picked7 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_19:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_20:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_21:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:12:1, col:35> col:5 referenced not_picked1 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:23, col:35>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_25:0x[a-z0-9]*]] <col:32> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_26:0x[a-z0-9]*]] <line:13:1, col:35> col:5 referenced not_picked2 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_27:0x[a-z0-9]*]] <col:23, col:35>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_28:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_29:0x[a-z0-9]*]] <col:32> 'int' 2
// CHECK-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:14:1, col:21> col:5 referenced not_picked3 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_31:0x[a-z0-9]*]] <line:15:1, col:21> col:5 referenced not_picked4 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_32:0x[a-z0-9]*]] <line:16:1, col:21> col:5 referenced not_picked5 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_33:0x[a-z0-9]*]] <line:17:1, col:21> col:5 referenced not_picked6 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_34:0x[a-z0-9]*]] <line:20:1, col:29> col:5 used base1 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_35:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_36:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_37:0x[a-z0-9]*]] <col:26> 'int' 3
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_38:0x[a-z0-9]*]] <line:19:1, col:107> Implicit implementation={extension(match_any)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'picked1' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_40:0x[a-z0-9]*]] <line:23:1, col:29> col:5 used base2 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:26> 'int' 4
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_44:0x[a-z0-9]*]] <line:22:1, col:109> Implicit implementation={extension(match_none)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'picked2' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_46:0x[a-z0-9]*]] <line:26:1, col:29> col:5 used base3 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_47:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_48:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_49:0x[a-z0-9]*]] <col:26> 'int' 5
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_50:0x[a-z0-9]*]] <line:25:1, col:120> Implicit implementation={vendor(pgi), extension(match_any)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_51:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_8]] 'picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_52:0x[a-z0-9]*]] <line:29:1, col:29> col:5 used base4 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_53:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_54:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_55:0x[a-z0-9]*]] <col:26> 'int' 6
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_56:0x[a-z0-9]*]] <line:28:1, col:130> Implicit user={condition(0)}, implementation={extension(match_none)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_57:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_9]] 'picked4' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_58:0x[a-z0-9]*]] <line:32:1, col:29> col:5 used base5 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_59:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_60:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_61:0x[a-z0-9]*]] <col:26> 'int' 7
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_62:0x[a-z0-9]*]] <line:31:1, col:123> Implicit user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_63:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'picked5' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_64:0x[a-z0-9]*]] <line:35:1, col:29> col:5 used base6 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_65:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_66:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_67:0x[a-z0-9]*]] <col:26> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_68:0x[a-z0-9]*]] <line:34:1, col:112> Implicit implementation={extension(match_any)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_69:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_22]] 'not_picked1' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_70:0x[a-z0-9]*]] <line:38:1, col:29> col:5 used base7 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_71:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_72:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_73:0x[a-z0-9]*]] <col:26> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_74:0x[a-z0-9]*]] <line:37:1, col:112> Implicit implementation={extension(match_none)}, device={kind(gpu, cpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_75:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_26]] 'not_picked2' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_76:0x[a-z0-9]*]] <line:41:1, col:29> col:5 used base8 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_77:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_78:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_79:0x[a-z0-9]*]] <col:26> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_80:0x[a-z0-9]*]] <line:40:1, col:126> Implicit implementation={vendor(llvm), extension(match_any)}, device={kind(fpga, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_81:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_30]] 'not_picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_82:0x[a-z0-9]*]] <line:44:1, col:29> col:5 used base9 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_83:0x[a-z0-9]*]] <col:17, col:29>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_84:0x[a-z0-9]*]] <col:19, col:26>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_85:0x[a-z0-9]*]] <col:26> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_86:0x[a-z0-9]*]] <line:43:1, col:134> Implicit user={condition(1)}, implementation={extension(match_none)}, device={kind(gpu, fpga)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_87:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_31]] 'not_picked4' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_88:0x[a-z0-9]*]] <line:47:1, col:30> col:5 used base10 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_89:0x[a-z0-9]*]] <col:18, col:30>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_90:0x[a-z0-9]*]] <col:20, col:27>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_91:0x[a-z0-9]*]] <col:27> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_92:0x[a-z0-9]*]] <line:46:1, col:132> Implicit user={condition(1)}, implementation={extension(match_all)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_93:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_32]] 'not_picked5' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_94:0x[a-z0-9]*]] <line:50:1, col:30> col:5 used base11 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_95:0x[a-z0-9]*]] <col:18, col:30>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_96:0x[a-z0-9]*]] <col:20, col:27>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_97:0x[a-z0-9]*]] <col:27> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_98:0x[a-z0-9]*]] <line:49:1, col:86> Implicit implementation={extension(match_any)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_99:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_33]] 'not_picked6' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_100:0x[a-z0-9]*]] <line:53:1, col:30> col:5 used base12 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_101:0x[a-z0-9]*]] <col:18, col:30>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_102:0x[a-z0-9]*]] <col:20, col:27>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_103:0x[a-z0-9]*]] <col:27> 'int' 8
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_104:0x[a-z0-9]*]] <line:52:1, col:82> Implicit implementation={extension(match_all)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_105:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'picked6' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_106:0x[a-z0-9]*]] <line:56:1, col:30> col:5 used base13 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_107:0x[a-z0-9]*]] <col:18, col:30>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_108:0x[a-z0-9]*]] <col:20, col:27>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_109:0x[a-z0-9]*]] <col:27> 'int' 9
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_110:0x[a-z0-9]*]] <line:55:1, col:83> Implicit implementation={extension(match_none)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_111:0x[a-z0-9]*]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_18]] 'picked7' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: |-FunctionDecl [[ADDR_112:0x[a-z0-9]*]] <line:59:1, col:21> col:5 implicit used overloaded1 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_113:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(match_any)}, device={kind(cpu, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_114:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_115:0x[a-z0-9]*]] 'overloaded1[implementation={extension(match_any)}, device={kind(cpu, gpu)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_115]] <col:1, col:35> col:1 overloaded1[implementation={extension(match_any)}, device={kind(cpu, gpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_116:0x[a-z0-9]*]] <col:23, col:35>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_117:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_118:0x[a-z0-9]*]] <col:32> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_119:0x[a-z0-9]*]] <line:62:1, col:35> col:5 used overloaded2 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_120:0x[a-z0-9]*]] <col:23, col:35>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_121:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_122:0x[a-z0-9]*]] <col:32> 'int' 1
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_123:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={extension(match_none)}, device={kind(fpga, gpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_124:0x[a-z0-9]*]] <line:64:1> 'int ({{.*}})' {{.*}}Function [[ADDR_125:0x[a-z0-9]*]] 'overloaded2[implementation={extension(match_none)}, device={kind(fpga, gpu)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_125]] <col:1, col:35> col:1 overloaded2[implementation={extension(match_none)}, device={kind(fpga, gpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_126:0x[a-z0-9]*]] <col:23, col:35>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_127:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_128:0x[a-z0-9]*]] <col:32> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_129:0x[a-z0-9]*]] prev [[ADDR_8]] <line:72:1, col:31> col:5 picked3 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_130:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_131:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_132:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_133:0x[a-z0-9]*]] prev [[ADDR_9]] <line:73:1, col:31> col:5 picked4 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_134:0x[a-z0-9]*]] <col:19, col:31>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_135:0x[a-z0-9]*]] <col:21, col:28>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_136:0x[a-z0-9]*]] <col:28> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_137:0x[a-z0-9]*]] prev [[ADDR_30]] <line:74:1, col:36> col:5 not_picked3 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_138:0x[a-z0-9]*]] <col:23, col:36>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_139:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_140:0x[a-z0-9]*]] <col:32> 'int' 10
// CHECK-NEXT: |-FunctionDecl [[ADDR_141:0x[a-z0-9]*]] prev [[ADDR_31]] <line:75:1, col:36> col:5 not_picked4 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_142:0x[a-z0-9]*]] <col:23, col:36>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_143:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_144:0x[a-z0-9]*]] <col:32> 'int' 11
// CHECK-NEXT: |-FunctionDecl [[ADDR_145:0x[a-z0-9]*]] prev [[ADDR_32]] <line:76:1, col:36> col:5 not_picked5 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_146:0x[a-z0-9]*]] <col:23, col:36>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_147:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_148:0x[a-z0-9]*]] <col:32> 'int' 12
// CHECK-NEXT: |-FunctionDecl [[ADDR_149:0x[a-z0-9]*]] prev [[ADDR_33]] <line:77:1, col:36> col:5 not_picked6 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_150:0x[a-z0-9]*]] <col:23, col:36>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_151:0x[a-z0-9]*]] <col:25, col:32>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_152:0x[a-z0-9]*]] <col:32> 'int' 13
// CHECK-NEXT: `-FunctionDecl [[ADDR_153:0x[a-z0-9]*]] <line:79:1, line:84:1> line:79:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_154:0x[a-z0-9]*]] <col:16, line:84:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_155:0x[a-z0-9]*]] <line:81:3, line:83:38>
// CHECK-NEXT: `-BinaryOperator [[ADDR_156:0x[a-z0-9]*]] <line:81:10, line:83:38> 'int' '+'
// CHECK-NEXT: |-BinaryOperator [[ADDR_157:0x[a-z0-9]*]] <line:81:10, line:83:22> 'int' '+'
// CHECK-NEXT: | |-BinaryOperator [[ADDR_158:0x[a-z0-9]*]] <line:81:10, line:82:70> 'int' '+'
// CHECK-NEXT: | | |-BinaryOperator [[ADDR_159:0x[a-z0-9]*]] <line:81:10, line:82:59> 'int' '+'
// CHECK-NEXT: | | | |-BinaryOperator [[ADDR_160:0x[a-z0-9]*]] <line:81:10, line:82:48> 'int' '+'
// CHECK-NEXT: | | | | |-BinaryOperator [[ADDR_161:0x[a-z0-9]*]] <line:81:10, line:82:37> 'int' '+'
// CHECK-NEXT: | | | | | |-BinaryOperator [[ADDR_162:0x[a-z0-9]*]] <line:81:10, line:82:26> 'int' '+'
// CHECK-NEXT: | | | | | | |-BinaryOperator [[ADDR_163:0x[a-z0-9]*]] <line:81:10, line:82:16> 'int' '+'
// CHECK-NEXT: | | | | | | | |-BinaryOperator [[ADDR_164:0x[a-z0-9]*]] <line:81:10, col:76> 'int' '+'
// CHECK-NEXT: | | | | | | | | |-BinaryOperator [[ADDR_165:0x[a-z0-9]*]] <col:10, col:66> 'int' '+'
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator [[ADDR_166:0x[a-z0-9]*]] <col:10, col:56> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | |-BinaryOperator [[ADDR_167:0x[a-z0-9]*]] <col:10, col:46> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | | |-BinaryOperator [[ADDR_168:0x[a-z0-9]*]] <col:10, col:36> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | | | |-BinaryOperator [[ADDR_169:0x[a-z0-9]*]] <col:10, col:26> 'int' '+'
// CHECK-NEXT: | | | | | | | | | | | | | |-PseudoObjectExpr [[ADDR_170:0x[a-z0-9]*]] <col:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | |-CallExpr [[ADDR_171:0x[a-z0-9]*]] <col:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_172:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_173:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_34]] 'base1' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | | | | `-CallExpr [[ADDR_174:0x[a-z0-9]*]] <line:19:29, line:81:16> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_175:0x[a-z0-9]*]] <line:19:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_39]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'picked1' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_176:0x[a-z0-9]*]] <line:81:20, col:26> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | |-CallExpr [[ADDR_177:0x[a-z0-9]*]] <col:20, col:26> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_178:0x[a-z0-9]*]] <col:20> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_179:0x[a-z0-9]*]] <col:20> 'int ({{.*}})' {{.*}}Function [[ADDR_40]] 'base2' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | | | `-CallExpr [[ADDR_180:0x[a-z0-9]*]] <line:22:29, line:81:26> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_181:0x[a-z0-9]*]] <line:22:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_45]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'picked2' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_182:0x[a-z0-9]*]] <line:81:30, col:36> 'int'
// CHECK-NEXT: | | | | | | | | | | | | |-CallExpr [[ADDR_183:0x[a-z0-9]*]] <col:30, col:36> 'int'
// CHECK-NEXT: | | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_184:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | | `-DeclRefExpr [[ADDR_185:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_46]] 'base3' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | | `-CallExpr [[ADDR_186:0x[a-z0-9]*]] <line:25:29, line:81:36> 'int'
// CHECK-NEXT: | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_187:0x[a-z0-9]*]] <line:25:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | `-DeclRefExpr [[ADDR_51]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_8]] 'picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | | `-PseudoObjectExpr [[ADDR_188:0x[a-z0-9]*]] <line:81:40, col:46> 'int'
// CHECK-NEXT: | | | | | | | | | | | |-CallExpr [[ADDR_189:0x[a-z0-9]*]] <col:40, col:46> 'int'
// CHECK-NEXT: | | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_190:0x[a-z0-9]*]] <col:40> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | | `-DeclRefExpr [[ADDR_191:0x[a-z0-9]*]] <col:40> 'int ({{.*}})' {{.*}}Function [[ADDR_52]] 'base4' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | | `-CallExpr [[ADDR_192:0x[a-z0-9]*]] <line:28:29, line:81:46> 'int'
// CHECK-NEXT: | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_193:0x[a-z0-9]*]] <line:28:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr [[ADDR_57]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_9]] 'picked4' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | | `-PseudoObjectExpr [[ADDR_194:0x[a-z0-9]*]] <line:81:50, col:56> 'int'
// CHECK-NEXT: | | | | | | | | | | |-CallExpr [[ADDR_195:0x[a-z0-9]*]] <col:50, col:56> 'int'
// CHECK-NEXT: | | | | | | | | | | | `-ImplicitCastExpr [[ADDR_196:0x[a-z0-9]*]] <col:50> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr [[ADDR_197:0x[a-z0-9]*]] <col:50> 'int ({{.*}})' {{.*}}Function [[ADDR_58]] 'base5' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | | | `-CallExpr [[ADDR_198:0x[a-z0-9]*]] <line:31:29, line:81:56> 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr [[ADDR_199:0x[a-z0-9]*]] <line:31:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr [[ADDR_63]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'picked5' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | | | | `-CallExpr [[ADDR_200:0x[a-z0-9]*]] <line:81:60, col:66> 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr [[ADDR_201:0x[a-z0-9]*]] <col:60> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr [[ADDR_202:0x[a-z0-9]*]] <col:60> 'int ({{.*}})' {{.*}}Function [[ADDR_64]] 'base6' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | | `-CallExpr [[ADDR_203:0x[a-z0-9]*]] <col:70, col:76> 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr [[ADDR_204:0x[a-z0-9]*]] <col:70> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr [[ADDR_205:0x[a-z0-9]*]] <col:70> 'int ({{.*}})' {{.*}}Function [[ADDR_70]] 'base7' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | `-PseudoObjectExpr [[ADDR_206:0x[a-z0-9]*]] <line:82:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | |-CallExpr [[ADDR_207:0x[a-z0-9]*]] <col:10, col:16> 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr [[ADDR_208:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr [[ADDR_209:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_76]] 'base8' 'int ({{.*}})'
// CHECK-NEXT: | | | | | | | `-CallExpr [[ADDR_210:0x[a-z0-9]*]] <line:40:29, line:82:16> 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr [[ADDR_211:0x[a-z0-9]*]] <line:40:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr [[ADDR_81]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_30]] 'not_picked3' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | | | | | `-CallExpr [[ADDR_212:0x[a-z0-9]*]] <line:82:20, col:26> 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr [[ADDR_213:0x[a-z0-9]*]] <col:20> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | | `-DeclRefExpr [[ADDR_214:0x[a-z0-9]*]] <col:20> 'int ({{.*}})' {{.*}}Function [[ADDR_82]] 'base9' 'int ({{.*}})'
// CHECK-NEXT: | | | | | `-CallExpr [[ADDR_215:0x[a-z0-9]*]] <col:30, col:37> 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr [[ADDR_216:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | | `-DeclRefExpr [[ADDR_217:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_88]] 'base10' 'int ({{.*}})'
// CHECK-NEXT: | | | | `-CallExpr [[ADDR_218:0x[a-z0-9]*]] <col:41, col:48> 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_219:0x[a-z0-9]*]] <col:41> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_220:0x[a-z0-9]*]] <col:41> 'int ({{.*}})' {{.*}}Function [[ADDR_94]] 'base11' 'int ({{.*}})'
// CHECK-NEXT: | | | `-PseudoObjectExpr [[ADDR_221:0x[a-z0-9]*]] <col:52, col:59> 'int'
// CHECK-NEXT: | | | |-CallExpr [[ADDR_222:0x[a-z0-9]*]] <col:52, col:59> 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr [[ADDR_223:0x[a-z0-9]*]] <col:52> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_224:0x[a-z0-9]*]] <col:52> 'int ({{.*}})' {{.*}}Function [[ADDR_100]] 'base12' 'int ({{.*}})'
// CHECK-NEXT: | | | `-CallExpr [[ADDR_225:0x[a-z0-9]*]] <line:52:29, line:82:59> 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_226:0x[a-z0-9]*]] <line:52:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_105]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'picked6' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | | `-PseudoObjectExpr [[ADDR_227:0x[a-z0-9]*]] <line:82:63, col:70> 'int'
// CHECK-NEXT: | | |-CallExpr [[ADDR_228:0x[a-z0-9]*]] <col:63, col:70> 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_229:0x[a-z0-9]*]] <col:63> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_230:0x[a-z0-9]*]] <col:63> 'int ({{.*}})' {{.*}}Function [[ADDR_106]] 'base13' 'int ({{.*}})'
// CHECK-NEXT: | | `-CallExpr [[ADDR_231:0x[a-z0-9]*]] <line:55:29, line:82:70> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_232:0x[a-z0-9]*]] <line:55:29> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_111]] <col:29> 'int ({{.*}})' {{.*}}Function [[ADDR_18]] 'picked7' 'int ({{.*}})' non_odr_use_unevaluated
// CHECK-NEXT: | `-PseudoObjectExpr [[ADDR_233:0x[a-z0-9]*]] <line:83:10, col:22> 'int'
// CHECK-NEXT: | |-CallExpr [[ADDR_234:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_235:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_236:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_112]] 'overloaded1' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_237:0x[a-z0-9]*]] <line:59:1, line:83:22> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_238:0x[a-z0-9]*]] <line:59:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_114]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_115]] 'overloaded1[implementation={extension(match_any)}, device={kind(cpu, gpu)}]' 'int ({{.*}})'
// CHECK-NEXT: `-PseudoObjectExpr [[ADDR_239:0x[a-z0-9]*]] <line:83:26, col:38> 'int'
// CHECK-NEXT: |-CallExpr [[ADDR_240:0x[a-z0-9]*]] <col:26, col:38> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_241:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_242:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' {{.*}}Function [[ADDR_119]] 'overloaded2' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_243:0x[a-z0-9]*]] <line:64:1, line:83:38> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_244:0x[a-z0-9]*]] <line:64:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_124]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_125]] 'overloaded2[implementation={extension(match_none)}, device={kind(fpga, gpu)}]' 'int ({{.*}})'
|
imginputfileconn.h | /**
* DeepDetect
* Copyright (c) 2014 Emmanuel Benazera
* Author: Emmanuel Benazera <beniz@droidnik.fr>
*
* This file is part of deepdetect.
*
* deepdetect is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* deepdetect is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with deepdetect. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef IMGINPUTFILECONN_H
#define IMGINPUTFILECONN_H
#include "inputconnectorstrategy.h"
#include <opencv2/opencv.hpp>
#ifdef USE_CUDA_CV
#include <opencv2/cudaimgproc.hpp>
#endif
#if CV_VERSION_MAJOR >= 3
#define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR
#define CV_LOAD_IMAGE_GRAYSCALE cv::IMREAD_GRAYSCALE
#define CV_LOAD_IMAGE_UNCHANGED cv::IMREAD_UNCHANGED
#define CV_BGR2RGB cv::COLOR_BGR2RGB
#define CV_BGR2GRAY cv::COLOR_BGR2GRAY
#define CV_GRAY2RGB cv::COLOR_GRAY2RGB
#define CV_YCrCb2RGB cv::COLOR_YCrCb2RGB
#define CV_YCrCb2BGR cv::COLOR_YCrCb2BGR
#define CV_BGR2YCrCb cv::COLOR_BGR2YCrCb
#define CV_INTER_CUBIC cv::INTER_CUBIC
#endif
#include "ext/base64/base64.h"
#include "utils/apitools.h"
#include <random>
#include "dto/input_connector.hpp"
namespace dd
{
class DDImg
{
public:
DDImg()
{
}
~DDImg()
{
}
// base64 detection
bool is_within_base64_range(char c) const
{
if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9') || (c == '+' || c == '/' || c == '='))
return true;
else
return false;
}
bool possibly_base64(const std::string &s) const
{
bool ism = is_multiple_four(s);
if (!ism)
return false;
for (char c : s)
{
bool within_64 = is_within_base64_range(c);
if (!within_64)
return false;
}
return true;
}
bool is_multiple_four(const std::string &s) const
{
if (s.length() % 4 == 0)
return true;
else
return false;
}
void resize(const cv::Mat &src, cv::Mat &dst, const cv::Size &cvsize,
const double &fx, const double &fy) const
{
#ifdef USE_CUDA_CV
if (_cuda)
{
cv::cuda::GpuMat d_src;
d_src.upload(src);
cv::cuda::GpuMat d_dst;
cv::cuda::resize(d_src, d_dst, cvsize, fx, fy, select_cv_interp());
if (_histogram_equalization)
{
if (_bw)
{
cv::cuda::equalizeHist(d_dst, d_dst);
if (_rgb)
cv::cuda::cvtColor(d_dst, d_dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cuda::cvtColor(d_dst, d_dst, CV_BGR2YCrCb);
std::vector<cv::cuda::GpuMat> vec_channels;
cv::cuda::split(d_dst, vec_channels);
cv::cuda::equalizeHist(vec_channels[0], vec_channels[0]);
cv::cuda::merge(vec_channels, d_dst);
if (_rgb)
cv::cuda::cvtColor(d_dst, d_dst, CV_YCrCb2RGB);
else
cv::cuda::cvtColor(d_dst, d_dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cuda::cvtColor(d_dst, d_dst, CV_GRAY2RGB);
else:
cv::cuda::cvtColor(d_dst, d_dst, CV_BGR2RGB);
}
d_dst.download(dst);
}
else
#endif
{
cv::resize(src, dst, cvsize, fx, fy, select_cv_interp());
if (_histogram_equalization)
{
if (_bw)
{
cv::equalizeHist(dst, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cvtColor(dst, dst, CV_BGR2YCrCb);
std::vector<cv::Mat> vec_channels;
cv::split(dst, vec_channels);
cv::equalizeHist(vec_channels[0], vec_channels[0]);
cv::merge(vec_channels, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_YCrCb2RGB);
else
cv::cvtColor(dst, dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
else
cv::cvtColor(dst, dst, CV_BGR2RGB);
}
}
}
void scale(const cv::Mat &src, cv::Mat &dst) const
{
float coef = std::min(
static_cast<float>(_scale_max) / std::max(src.rows, src.cols),
static_cast<float>(_scale_min) / std::min(src.rows, src.cols));
resize(src, dst, cv::Size(), coef, coef);
}
/// Apply preprocessing to image and add it to the list of images
/// img_name: name of the image as displayed in error messages
int add_image(const cv::Mat &img, const std::string &img_name)
{
if (_keep_orig)
_orig_imgs.push_back(img);
if (img.empty())
{
_logger->error("empty image {}", img_name);
return -1;
}
_imgs_size.push_back(std::pair<int, int>(img.rows, img.cols));
cv::Mat rimg;
try
{
if (_scaled)
scale(img, rimg);
else if (_width == 0 || _height == 0)
{
if (_width == 0 && _height == 0)
{
// Do nothing and keep native resolution. May cause issues if
// batched images are different resolutions
rimg = img;
}
else
{
// Resize so that the larger dimension is set to whichever
// (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different
// resolutions
size_t currMaxDim = std::max(img.rows, img.cols);
double scale = static_cast<double>(std::max(_width, _height))
/ static_cast<double>(currMaxDim);
resize(img, rimg, cv::Size(), scale, scale);
}
}
else
{
// Resize normally to the specified width and height
resize(img, rimg, cv::Size(_width, _height), 0, 0);
}
}
catch (...)
{
throw InputConnectorBadParamException("failed resizing image "
+ img_name);
}
if (_crop_width != 0 && _crop_height != 0)
{
int widthBorder = (_width - _crop_width) / 2;
int heightBorder = (_height - _crop_height) / 2;
try
{
rimg = rimg(cv::Rect(widthBorder, heightBorder, _crop_width,
_crop_height));
}
catch (...)
{
throw InputConnectorBadParamException("failed cropping image "
+ img_name);
}
}
_imgs.push_back(std::move(rimg));
return 0;
}
// decode image
void decode(const std::string &str)
{
std::vector<unsigned char> vdat(str.begin(), str.end());
cv::Mat img = cv::Mat(cv::imdecode(
cv::Mat(vdat, false),
_unchanged_data
? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)));
add_image(img, "base64 image");
}
// deserialize image, independent of format
void deserialize(std::stringstream &input)
{
size_t size = 0;
input.seekg(0, input.end);
size = input.tellg();
input.seekg(0, input.beg);
char *data = new char[size];
input.read(data, size);
std::string str(data, data + size);
delete[] data;
decode(str);
}
// data acquisition
int read_file(const std::string &fname, int test_id)
{
(void)test_id;
cv::Mat img
= cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
return add_image(img, fname);
}
int read_db(const std::string &fname)
{
_db_fname = fname;
return 0;
}
int read_mem(const std::string &content)
{
_in_mem = true;
cv::Mat timg;
_b64 = possibly_base64(content);
if (_b64)
{
std::string ccontent;
Base64::Decode(content, &ccontent);
std::stringstream sstr;
sstr << ccontent;
deserialize(sstr);
}
else
{
decode(content);
}
if (_imgs.at(0).empty())
return -1;
return 0;
}
int read_dir(const std::string &dir, int test_id)
{
(void)test_id;
// list directories in dir
std::unordered_set<std::string> subdirs;
if (fileops::list_directory(dir, false, true, false, subdirs))
throw InputConnectorBadParamException(
"failed reading text subdirectories in data directory " + dir);
_logger->info("imginputfileconn: list subdirs size={}", subdirs.size());
// list files and classes
std::vector<std::pair<std::string, int>> lfiles; // labeled files
std::unordered_map<int, std::string>
hcorresp; // correspondence class number / class name
if (!subdirs.empty())
{
int cl = 0;
auto uit = subdirs.begin();
while (uit != subdirs.end())
{
std::unordered_set<std::string> subdir_files;
if (fileops::list_directory((*uit), true, false, true,
subdir_files))
throw InputConnectorBadParamException(
"failed reading image data sub-directory " + (*uit));
auto fit = subdir_files.begin();
while (fit != subdir_files.end()) // XXX: re-iterating the file
// is not optimal
{
lfiles.push_back(std::pair<std::string, int>((*fit), cl));
++fit;
}
++cl;
++uit;
}
}
else
{
std::unordered_set<std::string> test_files;
fileops::list_directory(dir, true, false, false, test_files);
auto fit = test_files.begin();
while (fit != test_files.end())
{
lfiles.push_back(
std::pair<std::string, int>((*fit), -1)); // -1 for no class
++fit;
}
}
// read images
_imgs.reserve(lfiles.size());
_img_files.reserve(lfiles.size());
_labels.reserve(lfiles.size());
for (std::pair<std::string, int> &p : lfiles)
{
cv::Mat img = cv::imread(
p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
add_image(img, p.first);
_img_files.push_back(p.first);
if (p.second >= 0)
_labels.push_back(p.second);
if (_imgs.size() % 1000 == 0)
_logger->info("read {} images", _imgs.size());
}
return 0;
}
int select_cv_interp() const
{
if (_interp == "nearest")
return cv::INTER_NEAREST;
else if (_interp == "linear")
return cv::INTER_LINEAR;
else if (_interp == "area")
return cv::INTER_AREA;
else if (_interp == "lanczos4")
return cv::INTER_LANCZOS4;
else /* if (_interp == "cubic") */
return cv::INTER_CUBIC; // default
}
std::vector<cv::Mat> _imgs;
std::vector<cv::Mat> _orig_imgs;
std::vector<std::string> _img_files;
std::vector<std::pair<int, int>> _imgs_size;
bool _bw = false;
bool _rgb = false;
bool _histogram_equalization = false;
bool _in_mem = false;
bool _unchanged_data = false;
std::vector<int> _labels;
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
float _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
bool _b64 = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
#endif
std::string _db_fname;
std::shared_ptr<spdlog::logger> _logger;
};
class ImgInputFileConn : public InputConnectorStrategy
{
public:
ImgInputFileConn() : InputConnectorStrategy()
{
}
ImgInputFileConn(const ImgInputFileConn &i)
: InputConnectorStrategy(i), _width(i._width), _height(i._height),
_crop_width(i._crop_width), _crop_height(i._crop_height), _bw(i._bw),
_rgb(i._rgb), _unchanged_data(i._unchanged_data),
_test_split(i._test_split), _mean(i._mean),
_has_mean_scalar(i._has_mean_scalar), _scale(i._scale),
_scaled(i._scaled), _scale_min(i._scale_min),
_scale_max(i._scale_max), _keep_orig(i._keep_orig),
_interp(i._interp)
#ifdef USE_CUDA_CV
,
_cuda(i._cuda)
#endif
{
}
~ImgInputFileConn()
{
}
void init(const APIData &ad)
{
fillup_parameters(ad);
}
void fillup_parameters(const APIData &ad)
{
auto params = ad.createSharedDTO<dd::DTO::InputConnector>();
fillup_parameters(params);
}
void fillup_parameters(oatpp::Object<DTO::InputConnector> params)
{
// optional parameters.
if (params->width)
_width = params->width;
if (params->height)
_height = params->height;
if (params->crop_width)
{
if (params->crop_width > _width)
{
_logger->error("Crop width must be less than or equal to width");
throw InputConnectorBadParamException(
"Crop width must be less than or equal to width");
}
_width = params->crop_width;
}
if (params->crop_height)
{
if (params->crop_height > _height)
{
_logger->error(
"Crop height must be less than or equal to height");
throw InputConnectorBadParamException(
"Crop height must be less than or equal to height");
}
_height = params->crop_height;
}
_bw |= params->bw;
_rgb |= params->rgb;
_histogram_equalization |= params->histogram_equalization;
_unchanged_data |= params->unchanged_data;
_shuffle |= params->shuffle;
if (params->seed)
_seed = params->seed;
if (params->test_split)
_test_split = params->test_split;
if (params->mean)
{
// NOTE(sileht): if we have two much of this we can create
// an oat++ type that directly handle std::vector<float> instead
// of using the oatpp::Vector<oatpp::Float32>
_mean = std::vector<float>();
for (auto &v : *params->mean)
_mean.push_back(v);
_has_mean_scalar = true;
}
if (params->std)
{
_std = std::vector<float>();
for (auto &v : *params->std)
_std.push_back(v);
}
// Variable size
_scaled |= params->scaled;
if (params->scale)
try
{
_scale = params->scale.retrieve<oatpp::Float64>();
}
catch (const std::runtime_error &error)
{
std::string msg
= "could not read double value for scale input parameter";
_logger->error(msg);
throw InputConnectorBadParamException(msg);
}
if (params->scale_min)
{
_scaled = true;
_scale_min = params->scale_min;
}
if (params->scale_max)
{
_scaled = true;
_scale_max = params->scale_max;
}
// whether to keep original image (for chained ops, e.g. cropping)
_keep_orig |= params->keep_orig;
// image interpolation method
if (params->interp)
_interp = params->interp->std_str();
// timeout
this->set_timeout(params);
#ifdef USE_CUDA_CV
// image resizing on GPU
_cuda |= params->cuda;
#endif
}
void copy_parameters_to(DDImg &dimg) const
{
dimg._bw = _bw;
dimg._rgb = _rgb;
dimg._histogram_equalization = _histogram_equalization;
dimg._unchanged_data = _unchanged_data;
dimg._width = _width;
dimg._height = _height;
dimg._crop_width = _crop_width;
dimg._crop_height = _crop_height;
dimg._scale = _scale;
dimg._scaled = _scaled;
dimg._scale_min = _scale_min;
dimg._scale_max = _scale_max;
dimg._keep_orig = _keep_orig;
dimg._interp = _interp;
#ifdef USE_CUDA_CV
dimg._cuda = _cuda;
#endif
dimg._logger = _logger;
}
int feature_size() const
{
if (_bw || _unchanged_data)
{
// XXX: only valid for single channels
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height;
else
return _width * _height;
}
else
{
// RGB
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height * 3;
else
return _width * _height * 3;
}
}
int batch_size() const
{
return _images.size();
}
int test_batch_size() const
{
return _test_images.size();
}
void get_data(oatpp::Object<DTO::ServicePredict> pred_in)
{
if (!pred_in->_data_raw_img.empty())
{
_ids = pred_in->_ids;
_meta_uris = pred_in->_meta_uris;
_index_uris = pred_in->_index_uris;
_images = pred_in->_data_raw_img;
std::vector<cv::Mat> rimgs;
std::vector<std::string> uris;
int i = 0;
for (auto img : _images)
{
cv::Mat rimg;
resize(img, rimg, cv::Size(_width, _height), 0, 0);
if (_bw && rimg.channels() > 1)
{
cv::Mat bwimg;
cv::cvtColor(rimg, bwimg, CV_BGR2GRAY);
rimg = bwimg;
}
_images_size.push_back(std::pair<int, int>(img.rows, img.cols));
if (_keep_orig)
_orig_images.push_back(std::move(img));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
rimgs.push_back(std::move(rimg));
++i;
}
_images = rimgs;
if (!uris.empty())
_uris = uris;
}
else
InputConnectorStrategy::get_data(pred_in);
}
void get_data(const APIData &ad)
{
// check for raw cv::Mat
if (ad.has("data_raw_img"))
{
if (ad.has("ids"))
_ids = ad.get("ids").get<std::vector<std::string>>();
if (ad.has("meta_uris"))
_meta_uris = ad.get("meta_uris").get<std::vector<std::string>>();
if (ad.has("index_uris"))
_index_uris = ad.get("index_uris").get<std::vector<std::string>>();
_images = ad.get("data_raw_img").get<std::vector<cv::Mat>>();
std::vector<cv::Mat> rimgs;
std::vector<std::string> uris;
int i = 0;
for (auto img : _images)
{
cv::Mat rimg;
resize(img, rimg, cv::Size(_width, _height), 0, 0);
if (_bw && rimg.channels() > 1)
{
cv::Mat bwimg;
cv::cvtColor(rimg, bwimg, CV_BGR2GRAY);
rimg = bwimg;
}
_images_size.push_back(std::pair<int, int>(img.rows, img.cols));
if (_keep_orig)
_orig_images.push_back(std::move(img));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
rimgs.push_back(std::move(rimg));
++i;
}
_images = rimgs;
if (!uris.empty())
_uris = uris;
}
else
InputConnectorStrategy::get_data(ad);
}
void transform(const APIData &ad)
{
if (ad.has(
"parameters")) // hotplug of parameters, overriding the defaults
{
APIData ad_param = ad.getobj("parameters");
if (ad_param.has("input"))
{
fillup_parameters(ad_param.getobj("input"));
}
}
get_data(ad);
transform(nullptr);
}
void transform(oatpp::Object<DTO::ServicePredict> input_dto)
{
if (input_dto != nullptr) // [temporary] == nullptr if called from
// transform(APIData)
{
fillup_parameters(input_dto->parameters->input);
get_data(input_dto);
}
if (!_images.empty()) // got ready raw images
{
return;
}
int catch_read = 0;
std::string catch_msg;
std::vector<std::string> uris;
std::vector<std::string> meta_uris;
std::vector<std::string> index_uris;
std::vector<std::string> failed_uris;
#pragma omp parallel for
for (size_t i = 0; i < _uris.size(); i++)
{
bool no_img = false;
std::string u = _uris.at(i);
DataEl<DDImg> dimg(this->_input_timeout);
copy_parameters_to(dimg._ctype);
try
{
if (dimg.read_element(u, this->_logger))
{
_logger->error("no data for image {}", u);
no_img = true;
}
if (!dimg._ctype._db_fname.empty())
_db_fname = dimg._ctype._db_fname;
}
catch (std::exception &e)
{
#pragma omp critical
{
++catch_read;
catch_msg = e.what();
failed_uris.push_back(u);
no_img = true;
}
}
if (no_img)
continue;
if (!_db_fname.empty())
continue;
#pragma omp critical
{
_images.insert(_images.end(),
std::make_move_iterator(dimg._ctype._imgs.begin()),
std::make_move_iterator(dimg._ctype._imgs.end()));
if (_keep_orig)
_orig_images.insert(
_orig_images.end(),
std::make_move_iterator(dimg._ctype._orig_imgs.begin()),
std::make_move_iterator(dimg._ctype._orig_imgs.end()));
_images_size.insert(
_images_size.end(),
std::make_move_iterator(dimg._ctype._imgs_size.begin()),
std::make_move_iterator(dimg._ctype._imgs_size.end()));
if (!dimg._ctype._labels.empty())
_test_labels.insert(
_test_labels.end(),
std::make_move_iterator(dimg._ctype._labels.begin()),
std::make_move_iterator(dimg._ctype._labels.end()));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1)
uris.push_back(u);
else if (!dimg._ctype._img_files.empty())
uris.insert(
uris.end(),
std::make_move_iterator(dimg._ctype._img_files.begin()),
std::make_move_iterator(dimg._ctype._img_files.end()));
else
uris.push_back(std::to_string(i));
if (!_meta_uris.empty())
meta_uris.push_back(_meta_uris.at(i));
if (!_index_uris.empty())
index_uris.push_back(_index_uris.at(i));
}
}
if (catch_read)
{
for (auto s : failed_uris)
_logger->error("failed reading image {}", s);
throw InputConnectorBadParamException(catch_msg);
}
_uris = uris;
_ids = _uris; // since uris may be in different order than before
// transform
_meta_uris = meta_uris;
_index_uris = index_uris;
if (!_db_fname.empty())
return; // db filename is passed to backend
// shuffle before possible split
if (_shuffle)
{
std::mt19937 g;
if (_seed >= 0)
g = std::mt19937(_seed);
else
{
std::random_device rd;
g = std::mt19937(rd());
}
std::shuffle(_images.begin(), _images.end(),
g); // XXX beware: labels are not shuffled, i.e. let's
// not shuffle while testing
}
// split as required
if (_test_split > 0)
{
int split_size = std::floor(_images.size() * (1.0 - _test_split));
auto chit = _images.begin();
auto dchit = chit;
int cpos = 0;
while (chit != _images.end())
{
if (cpos == split_size)
{
if (dchit == _images.begin())
dchit = chit;
_test_images.push_back((*chit));
}
else
++cpos;
++chit;
}
_images.erase(dchit, _images.end());
_logger->info("data split test size={} / remaining data size={}",
_test_images.size(), _images.size());
}
if (_images.empty())
throw InputConnectorBadParamException("no image could be found");
}
static std::vector<double>
img_resize_vector(const std::vector<double> &vals, const int height_net,
const int width_net, const int height_dest,
const int width_dest, bool resize_nn)
{
cv::Mat segimg = cv::Mat(height_net, width_net, CV_64FC1);
std::memcpy(segimg.data, vals.data(), vals.size() * sizeof(double));
cv::Mat segimg_res;
if (resize_nn)
cv::resize(segimg, segimg_res, cv::Size(width_dest, height_dest), 0, 0,
cv::INTER_NEAREST);
else
cv::resize(segimg, segimg_res, cv::Size(width_dest, height_dest), 0, 0,
cv::INTER_LINEAR);
return std::vector<double>((double *)segimg_res.data,
(double *)segimg_res.data
+ segimg_res.rows * segimg_res.cols);
}
// data
std::vector<cv::Mat> _images;
std::vector<cv::Mat> _orig_images; /**< stored upon request. */
std::vector<cv::Mat> _test_images;
std::vector<int> _test_labels;
std::vector<std::pair<int, int>> _images_size;
// image parameters
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
bool _bw = false; /**< whether to convert to black & white. */
bool _rgb = false; /**< whether to convert to rgb. */
bool _histogram_equalization
= false; /**< whether to apply histogram equalizer. */
bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */
double _test_split = 0.0; /**< auto-split of the dataset. */
int _seed = -1; /**< shuffling seed. */
std::vector<float>
_mean; /**< mean image pixels, to be subtracted from images. */
std::vector<float> _std; /**< std, to divide image values. */
bool _has_mean_scalar = false; /**< whether scalar is set. */
std::string _db_fname;
double _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
#endif
};
}
#ifdef USE_CAFFE
#include "caffeinputconns.h"
#endif
#ifdef USE_TF
#include "backends/tf/tfinputconns.h"
#endif
#ifdef USE_DLIB
#include "backends/dlib/dlibinputconns.h"
#endif
#ifdef USE_NCNN
#include "backends/ncnn/ncnninputconns.h"
#endif
#ifdef USE_CAFFE2
#include "backends/caffe2/caffe2inputconns.h"
#endif
#ifdef USE_TENSORRT
#include "backends/tensorrt/tensorrtinputconns.h"
#endif
#ifdef USE_TORCH
#include "backends/torch/torchinputconns.h"
#endif
#endif
|
Euclid_apply.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_Euclid.h"
/* #include "Euclid_dh.h" */
/* #include "Mat_dh.h" */
/* #include "Factor_dh.h" */
/* #include "Parser_dh.h" */
/* #include "TimeLog_dh.h" */
/* #include "SubdomainGraph_dh.h" */
static void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs);
static void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT);
static void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT);
#undef __FUNC__
#define __FUNC__ "Euclid_dhApply"
void Euclid_dhApply(Euclid_dh ctx, HYPRE_Real *rhs, HYPRE_Real *lhs)
{
START_FUNC_DH
HYPRE_Real *rhs_, *lhs_;
HYPRE_Real t1, t2;
t1 = hypre_MPI_Wtime();
/* default settings; for everything except PILU */
ctx->from = 0;
ctx->to = ctx->m;
/* case 1: no preconditioning */
if (! strcmp(ctx->algo_ilu, "none") || ! strcmp(ctx->algo_par, "none")) {
HYPRE_Int i, m = ctx->m;
for (i=0; i<m; ++i) lhs[i] = rhs[i];
goto END_OF_FUNCTION;
}
/*----------------------------------------------------------------
* permute and scale rhs vector
*----------------------------------------------------------------*/
/* permute rhs vector */
if (ctx->sg != NULL) {
/* hypre_printf("@@@@@@@@@@@@@@@@@ permute_vec_n2o_private\n"); */
permute_vec_n2o_private(ctx, rhs, lhs); CHECK_V_ERROR;
rhs_ = lhs;
lhs_ = ctx->work2;
} else {
rhs_ = rhs;
lhs_ = lhs;
}
/* scale rhs vector */
if (ctx->isScaled) {
/* hypre_printf("@@@@@@@@@@@@@@@@@ scale_rhs_private\n"); */
scale_rhs_private(ctx, rhs_); CHECK_V_ERROR;
}
/* note: rhs_ is permuted, scaled; the input, "rhs" vector has
not been disturbed.
*/
/*----------------------------------------------------------------
* big switch to choose the appropriate triangular solve
*----------------------------------------------------------------*/
/* sequential and mpi block jacobi cases */
if (np_dh == 1 ||
! strcmp(ctx->algo_par, "bj") ) {
Factor_dhSolveSeq(rhs_, lhs_, ctx); CHECK_V_ERROR;
}
/* pilu case */
else {
Factor_dhSolve(rhs_, lhs_, ctx); CHECK_V_ERROR;
}
/*----------------------------------------------------------------
* unpermute lhs vector
* (note: don't need to unscale, because we were clever)
*----------------------------------------------------------------*/
if (ctx->sg != NULL) {
permute_vec_o2n_private(ctx, lhs_, lhs); CHECK_V_ERROR;
}
END_OF_FUNCTION: ;
t2 = hypre_MPI_Wtime();
/* collective timing for triangular solves */
ctx->timing[TRI_SOLVE_T] += (t2 - t1);
/* collective timing for setup+krylov+triSolves
(intent is to time linear solve, but this is
at best probelematical!)
*/
ctx->timing[TOTAL_SOLVE_TEMP_T] = t2 - ctx->timing[SOLVE_START_T];
/* total triangular solve count */
ctx->its += 1;
ctx->itsTotal += 1;
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "scale_rhs_private"
void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
REAL_DH *scale = ctx->scale;
/* if matrix was scaled, must scale the rhs */
if (scale != NULL) {
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<m; ++i) { rhs[i] *= scale[i]; }
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "permute_vec_o2n_private"
void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
HYPRE_Int *o2n = ctx->sg->o2n_col;
for (i=0; i<m; ++i) xOUT[i] = xIN[o2n[i]];
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "permute_vec_n2o_private"
void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT)
{
START_FUNC_DH
HYPRE_Int i, m = ctx->m;
HYPRE_Int *n2o = ctx->sg->n2o_row;
for (i=0; i<m; ++i) xOUT[i] = xIN[n2o[i]];
END_FUNC_DH
}
|
ast-dump-openmp-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:4:1, col:30>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:10:1, col:30>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:17:1, col:42>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:31, col:41>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:40> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:40> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:24:1, col:42>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:31, col:41>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:40> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:40> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPParallelForSimdDirective {{.*}} <line:31:1, col:42>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:31, col:41>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:40> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:40> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
SpMat.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SPMAT_H_
#define SRC_SPMAT_H_
#include <string>
#include <algorithm>
#include "GMDP/utils/binary_search.h"
template <typename T>
bool compare_tile_id(const tedge_t<T>& a, const tedge_t<T>& b) {
if (a.tile_id < b.tile_id)
return true;
return false;
}
template <typename SpTile>
class SpMat {
public:
int ntiles_x;
int ntiles_y;
int m;
int n;
std::vector<std::vector<SpTile*> > tiles;
std::vector<int> start_idx;
std::vector<int> start_idy;
std::vector<int> nodeIds;
friend boost::serialization::access;
template<class Archive>
void serialize(Archive& ar, const unsigned int version) {
ar & ntiles_x;
ar & ntiles_y;
ar & m;
ar & n;
ar & tiles;
ar & start_idx;
ar & start_idy;
ar & nodeIds;
}
inline int getPartition(const int src, const int dst, int* ival, int* jval) const {
(*ival) = -1;
(*jval) = -1;
for (int i = 0; i < ntiles_y; i++) {
if ((src > start_idy[i]) && (src <= start_idy[i + 1])) {
(*ival) = i;
break;
}
}
for (int j = 0; j < ntiles_x; j++) {
if ((dst > start_idx[j]) && (dst <= start_idx[j + 1])) {
(*jval) = j;
break;
}
}
if ((*ival) == -1 || (*jval) == -1) {
printf("%d %d == -1\n", src, dst);
return -1;
}
return (*ival) + (*jval) * ntiles_y;
}
template <typename T>
void ingestEdgelist(edgelist_t<T>& blob) {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
int nnz_l = blob.nnz;
edge_t<T>* edge_list = blob.edges;
int m = blob.m;
int n = blob.n;
printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz);
edge_t<T> * received_edges;
unsigned long int new_nnz = 0;
if(global_nrank == 1)
{
new_nnz = nnz_l;
received_edges = new edge_t<T>[new_nnz];
memcpy(received_edges, edge_list, new_nnz * sizeof(edge_t<T>));
}
else
{
tedge_t<T> * tedges = new tedge_t<T>[nnz_l];
#pragma omp parallel for
for(unsigned long i = 0 ; i < nnz_l ; i++)
{
tedges[i].src = edge_list[i].src;
tedges[i].dst = edge_list[i].dst;
tedges[i].val = edge_list[i].val;
int ival, jval;
int tile_id = getPartition(edge_list[i].src, edge_list[i].dst, &ival, &jval);
assert(tile_id != -1);
tedges[i].tile_id = nodeIds[ival + jval * ntiles_y];
}
__gnu_parallel::sort(tedges, tedges + nnz_l, compare_tile_id<T>);
int * assignment = new int[nnz_l];
#pragma omp parallel for
for(unsigned long i = 0 ; i < nnz_l ; i++)
{
edge_list[i].src = tedges[i].src;
edge_list[i].dst = tedges[i].dst;
edge_list[i].val = tedges[i].val;
assignment[i] = tedges[i].tile_id;
}
delete [] tedges;
unsigned long int * positions = new unsigned long[global_nrank+1];
unsigned long int * counts = new unsigned long[global_nrank];
unsigned long int * recv_positions = new unsigned long[global_nrank+1];
unsigned long int * recv_counts = new unsigned long[global_nrank];
unsigned long int current_count = 0;
for(int i = 0 ; i < global_nrank ; i++)
{
int point = binary_search_right_border(assignment, i, 0, nnz_l, nnz_l);
if(point == -1)
{
counts[i] = 0;
positions[i] = current_count;
}
else
{
counts[i] = (point+1) - current_count;
positions[i] = current_count;
current_count = (point+1);
}
}
positions[global_nrank] = nnz_l;
MPI_Barrier(MPI_COMM_WORLD);
delete [] assignment;
MPI_Request* mpi_req = new MPI_Request[2 * global_nrank];
MPI_Status* mpi_status = new MPI_Status[2 * global_nrank];
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD,
&mpi_req[i]);
}
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD,
&mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
recv_positions[0] = 0;
for(int i = 0 ; i < global_nrank ; i++)
{
new_nnz += recv_counts[i];
recv_positions[i+1] = new_nnz;
}
printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz);
MPI_Datatype MPI_EDGE_T;
MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T);
MPI_Type_commit(&MPI_EDGE_T);
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(edge_list + positions[i], counts[i] ,
MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]);
}
received_edges = new edge_t<T>[new_nnz];
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(received_edges + recv_positions[i], recv_counts[i] ,
MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
delete [] mpi_status;
delete [] mpi_req;
delete [] positions;
delete [] counts;
delete [] recv_positions;
delete [] recv_counts;
}
printf("Rank %d: After shuffle %ld edges\n", global_myrank, new_nnz);
tedge_t<T> * tedges2 = new tedge_t<T>[new_nnz];
#pragma omp parallel for
for(unsigned long i = 0 ; i < new_nnz ; i++)
{
tedges2[i].src = received_edges[i].src;
tedges2[i].dst = received_edges[i].dst;
tedges2[i].val = received_edges[i].val;
int ival, jval;
tedges2[i].tile_id = getPartition(received_edges[i].src, received_edges[i].dst, &ival, &jval);
assert(tedges2[i].tile_id != -1);
}
__gnu_parallel::sort(tedges2, tedges2 + new_nnz , compare_tile_id<T>);
int * assignment2 = new int[new_nnz];
#pragma omp parallel for
for(unsigned long i = 0 ; i < new_nnz ; i++)
{
received_edges[i].src = tedges2[i].src;
received_edges[i].dst = tedges2[i].dst;
received_edges[i].val = tedges2[i].val;
assignment2[i] = tedges2[i].tile_id;
}
delete [] tedges2;
for (int tile_j = 0; tile_j < ntiles_x; tile_j++) {
for (int tile_i = 0; tile_i < ntiles_y; tile_i++) {
if (nodeIds[tile_i + tile_j * ntiles_y] == global_myrank) {
int tile_m = start_idy[tile_i + 1] - start_idy[tile_i];
int tile_n = start_idx[tile_j + 1] - start_idx[tile_j];
int this_tile_id = tile_i + tile_j * ntiles_y;
// Find left and right
int start_nz = binary_search_left_border(assignment2, this_tile_id, 0, new_nnz, new_nnz);
int end_nz = binary_search_right_border(assignment2, this_tile_id, 0, new_nnz, new_nnz);
int nnz = 0;
if((start_nz != -1) && (end_nz != -1))
{
nnz = (end_nz+1) - start_nz;
}
if (nnz <= 0) {
tiles[tile_i][tile_j] = new SpTile(tile_m, tile_n);
} else {
tiles[tile_i][tile_j] =
new SpTile(received_edges + start_nz, tile_m, tile_n, nnz, start_idy[tile_i],
start_idx[tile_j]);
}
}
}
}
delete [] assignment2;
delete [] received_edges;
MPI_Barrier(MPI_COMM_WORLD);
}
void Allocate2DPartitioned(int m, int n, int _ntiles_x, int _ntiles_y,
int (*pfn)(int, int, int, int, int)) {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
ntiles_x = _ntiles_x;
ntiles_y = _ntiles_y;
assert(ntiles_x > 0);
assert(ntiles_y > 0);
this->m = m;
this->n = n;
int vx, vy;
int roundup = 256;
vx = ((((n + ntiles_x - 1) / ntiles_x) + roundup - 1) / roundup) * roundup;
vy = ((((m + ntiles_y - 1) / ntiles_y) + roundup - 1) / roundup) * roundup;
for (int j = 0; j < ntiles_x; j++) {
for (int i = 0; i < ntiles_y; i++) {
nodeIds.push_back(pfn(j, i, ntiles_x, ntiles_y, global_nrank));
}
}
for (int j = 0; j < ntiles_x; j++) {
start_idx.push_back(std::min(vx * j, n));
}
for (int i = 0; i < ntiles_y; i++) {
start_idy.push_back(std::min(vy * i, m));
}
start_idx.push_back(n);
start_idy.push_back(m);
// Allocate space for tiles
for (int tile_i = 0; tile_i < ntiles_y; tile_i++) {
std::vector<SpTile*> tmp;
for (int tile_j = 0; tile_j < ntiles_x; tile_j++) {
tmp.push_back((SpTile*)NULL);
}
tiles.push_back(tmp);
}
}
SpMat() {}
template <typename T>
SpMat(edgelist_t<T> edgelist, int ntx,
int nty, int (*pfn)(int, int, int, int, int)) {
Allocate2DPartitioned(edgelist.m, edgelist.n, ntx, nty, pfn);
ingestEdgelist(edgelist);
}
~SpMat()
{
for(auto it1 = tiles.begin() ; it1 != tiles.end() ; it1++)
{
for(auto it2 = it1->begin() ; it2 != it1->end() ; it2++)
{
delete *it2;
}
}
}
template <typename T>
void get_edges(edgelist_t<T>* edgelist) const {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
// Get nnz
int nnzs = 0;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
if (nodeIds[i + j * ntiles_y] == global_myrank) {
nnzs += tiles[i][j]->nnz;
}
}
}
edgelist->m = m;
edgelist->n = n;
edgelist->nnz = nnzs;
if(nnzs > 0)
{
edgelist->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)nnzs * (uint64_t)sizeof(edge_t<T>), 64));
nnzs = 0;
for (int i = 0; i < ntiles_y; i++) {
for (int j = 0; j < ntiles_x; j++) {
if (nodeIds[i + j * ntiles_y] == global_myrank) {
tiles[i][j]
->get_edges(edgelist->edges + nnzs, start_idy[i], start_idx[j]);
nnzs += tiles[i][j]->nnz;
}
}
}
}
}
uint64_t getNNZ()
{
int global_myrank = get_global_myrank();
uint64_t total_nnz = 0;
for(int i = 0 ; i < ntiles_y ; i++)
{
for(int j = 0 ; j < ntiles_x ; j++)
{
if(nodeIds[i + j * ntiles_y] == global_myrank)
{
total_nnz += tiles[i][j]->nnz;
}
}
}
// global reduction
MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return total_nnz;
}
};
template <template <typename> class SpTile, typename T>
void get_row_ranks(const SpMat<SpTile<T> >* mat,
std::vector<std::set<int> >* row_ranks_out,
std::vector<std::set<int> >* col_ranks_out) {
for (int i = 0; i < mat->ntiles_y; i++) {
// Create set of row nodeIDs
std::set<int> row_ranks;
for (int j = 0; j < mat->ntiles_x; j++) {
row_ranks.insert(mat->nodeIds[i + j * mat->ntiles_y]);
}
row_ranks_out->push_back(row_ranks);
}
for (int j = 0; j < mat->ntiles_x; j++) {
// Create set of col nodeIDs
std::set<int> col_ranks;
for (int i = 0; i < mat->ntiles_y; i++) {
col_ranks.insert(mat->nodeIds[i + j * mat->ntiles_y]);
}
col_ranks_out->push_back(col_ranks);
}
}
template <template <typename> class SpTile, typename T>
void Transpose(const SpMat<SpTile<T> >* mat, SpMat<SpTile<T> >** matc, int ntx,
int nty, int (*pfn)(int, int, int, int, int)) {
edgelist_t<T> edgelist;
mat->get_edges(&edgelist);
#pragma omp parallel for
for (int i = 0; i < edgelist.nnz; i++) {
int tmp = edgelist.edges[i].src;
edgelist.edges[i].src = edgelist.edges[i].dst;
edgelist.edges[i].dst = tmp;
}
int tmp = edgelist.m;
edgelist.m = edgelist.n;
edgelist.n = tmp;
(*matc) = new SpMat<SpTile<T> >(edgelist, ntx, nty, pfn);
if(edgelist.nnz > 0)
{
_mm_free(edgelist.edges);
}
}
#endif // SRC_SPMAT_H_
|
omp_realloc_size_0.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main()
{
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
omp_allocator_handle_t f_a;
void *ptr[2];
void *nptr[2];
at[0].key = omp_atk_pool_size;
at[0].value = 2*1024*1024;
at[1].key = omp_atk_fallback;
at[1].value = omp_atv_default_mem_fb;
a = omp_init_allocator(omp_large_cap_mem_space, 2, at);
f_a = omp_init_allocator(omp_default_mem_space, 2, at);
printf("allocator large created: %p\n", (void *)a);
printf("allocator default created: %p\n", (void *)f_a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
ptr[i] = omp_alloc(1024 * 1024, f_a);
#pragma omp barrier
nptr[i] = omp_realloc(ptr[i], 0, a, f_a);
#pragma omp barrier
printf("th %d, nptr %p\n", i, nptr[i]);
omp_free(nptr[i], a);
}
// Both ptr pointers should be non-NULL
if (ptr[0] == NULL || ptr[1] == NULL) {
printf("failed: pointers %p %p\n", ptr[0], ptr[1]);
return 1;
}
// Both nptr pointers should be NULL
if (nptr[0] != NULL || nptr[1] != NULL) {
printf("failed: pointers %p %p\n", nptr[0], nptr[1]);
return 1;
}
printf("passed\n");
return 0;
}
|
ConverterOSG.h | /* -*-c++-*- IfcQuery www.ifcquery.com
*
MIT License
Copyright (c) 2017 Fabian Gerold
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <osg/CullFace>
#include <osg/Geode>
#include <osg/Hint>
#include <osg/LineWidth>
#include <osg/Material>
#include <osg/Point>
#include <osgUtil/Tessellator>
#include <ifcpp/model/BasicTypes.h>
#include <ifcpp/model/OpenMPIncludes.h>
#include <ifcpp/model/StatusCallback.h>
#include <ifcpp/IFC4/include/IfcCurtainWall.h>
#include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h>
#include <ifcpp/IFC4/include/IfcGloballyUniqueId.h>
#include <ifcpp/IFC4/include/IfcProject.h>
#include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h>
#include <ifcpp/IFC4/include/IfcRelAggregates.h>
#include <ifcpp/IFC4/include/IfcSpace.h>
#include <ifcpp/IFC4/include/IfcWindow.h>
#include <ifcpp/geometry/GeometrySettings.h>
#include <ifcpp/geometry/SceneGraphUtils.h>
#include <ifcpp/geometry/AppearanceData.h>
#include "GeometryInputData.h"
#include "IncludeCarveHeaders.h"
#include "CSG_Adapter.h"
class ConverterOSG : public StatusCallback
{
protected:
shared_ptr<GeometrySettings> m_geom_settings;
std::map<std::string, osg::ref_ptr<osg::Switch> > m_map_entity_guid_to_switch;
std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_id_to_switch;
double m_recent_progress;
osg::ref_ptr<osg::CullFace> m_cull_back_off;
public:
ConverterOSG( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings(geom_settings)
{
m_cull_back_off = new osg::CullFace( osg::CullFace::BACK );
}
virtual ~ConverterOSG() {}
// Map: IfcProduct ID -> scenegraph switch
std::map<std::string, osg::ref_ptr<osg::Switch> >& getMapEntityGUIDToSwitch() { return m_map_entity_guid_to_switch; }
// Map: Representation Identifier -> scenegraph switch
std::map<int, osg::ref_ptr<osg::Switch> >& getMapRepresentationToSwitch() { return m_map_representation_id_to_switch; }
void clearInputCache()
{
m_map_entity_guid_to_switch.clear();
m_map_representation_id_to_switch.clear();
}
static void drawBoundingBox( const carve::geom::aabb<3>& aabb, osg::Geometry* geom )
{
osg::ref_ptr<osg::Vec3Array> vertices = dynamic_cast<osg::Vec3Array*>( geom->getVertexArray() );
if( !vertices )
{
vertices = new osg::Vec3Array();
geom->setVertexArray( vertices );
}
const carve::geom::vector<3>& aabb_pos = aabb.pos;
const carve::geom::vector<3>& extent = aabb.extent;
const double dex = extent.x;
const double dey = extent.y;
const double dez = extent.z;
const int vert_id_offset = vertices->size();
vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z - dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z - dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z - dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z - dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z + dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z + dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z + dez ) );
vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z + dez ) );
osg::ref_ptr<osg::DrawElementsUInt> box_lines = new osg::DrawElementsUInt( GL_LINE_STRIP, 0 );
box_lines->push_back( vert_id_offset + 0 );
box_lines->push_back( vert_id_offset + 1 );
box_lines->push_back( vert_id_offset + 2 );
box_lines->push_back( vert_id_offset + 3 );
box_lines->push_back( vert_id_offset + 0 );
box_lines->push_back( vert_id_offset + 4 );
box_lines->push_back( vert_id_offset + 5 );
box_lines->push_back( vert_id_offset + 1 );
box_lines->push_back( vert_id_offset + 5 );
box_lines->push_back( vert_id_offset + 6 );
box_lines->push_back( vert_id_offset + 2 );
box_lines->push_back( vert_id_offset + 6 );
box_lines->push_back( vert_id_offset + 7 );
box_lines->push_back( vert_id_offset + 3 );
box_lines->push_back( vert_id_offset + 7 );
box_lines->push_back( vert_id_offset + 4 );
geom->addPrimitiveSet( box_lines );
osg::ref_ptr<osg::Material> mat = new osg::Material();
if( !mat ) { throw OutOfMemoryException(); }
osg::Vec4f ambientColor( 1.f, 0.2f, 0.1f, 1.f );
mat->setAmbient( osg::Material::FRONT, ambientColor );
mat->setDiffuse( osg::Material::FRONT, ambientColor );
mat->setSpecular( osg::Material::FRONT, ambientColor );
//mat->setShininess( osg::Material::FRONT, shininess );
//mat->setColorMode( osg::Material::SPECULAR );
osg::StateSet* stateset = geom->getOrCreateStateSet();
if( !stateset ) { throw OutOfMemoryException(); }
stateset->setAttribute( mat, osg::StateAttribute::ON );
stateset->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
}
static void drawFace( const carve::mesh::Face<3>* face, osg::Geode* geode, bool add_color_array = false )
{
#ifdef _DEBUG
std::cout << "not triangulated" << std::endl;
#endif
std::vector<vec3> face_vertices;
face_vertices.resize( face->nVertices() );
carve::mesh::Edge<3> *e = face->edge;
const size_t num_vertices = face->nVertices();
for( size_t i = 0; i < num_vertices; ++i )
{
face_vertices[i] = e->v1()->v;
e = e->next;
}
if( num_vertices < 4 )
{
std::cout << "drawFace is meant only for num vertices > 4" << std::endl;
}
vec3* vertex_vec;
osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array( num_vertices );
if( !vertices ) { throw OutOfMemoryException(); }
osg::ref_ptr<osg::DrawElementsUInt> triangles = new osg::DrawElementsUInt( osg::PrimitiveSet::POLYGON, num_vertices );
if( !triangles ) { throw OutOfMemoryException(); }
for( size_t i = 0; i < num_vertices; ++i )
{
vertex_vec = &face_vertices[num_vertices - i - 1];
( *vertices )[i].set( vertex_vec->x, vertex_vec->y, vertex_vec->z );
( *triangles )[i] = i;
}
osg::Vec3f poly_normal = SceneGraphUtils::computePolygonNormal( vertices );
osg::ref_ptr<osg::Vec3Array> normals = new osg::Vec3Array();
normals->resize( num_vertices, poly_normal );
osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry();
geometry->setVertexArray( vertices );
geometry->setNormalArray( normals );
normals->setBinding( osg::Array::BIND_PER_VERTEX );
geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POLYGON, 0, vertices->size() ) );
if( add_color_array )
{
osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array();
colors->resize( vertices->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) );
colors->setBinding( osg::Array::BIND_PER_VERTEX );
geometry->setColorArray( colors );
}
if( num_vertices > 4 )
{
// TODO: check if polygon is convex with Gift wrapping algorithm
osg::ref_ptr<osgUtil::Tessellator> tesselator = new osgUtil::Tessellator();
tesselator->setTessellationType( osgUtil::Tessellator::TESS_TYPE_POLYGONS );
//tesselator->setWindingType( osgUtil::Tessellator::TESS_WINDING_ODD );
tesselator->retessellatePolygons( *geometry );
}
geode->addDrawable( geometry );
#ifdef DEBUG_DRAW_NORMALS
osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array();
for( size_t i = 0; i < num_vertices; ++i )
{
vertex_vec = &face_vertices[num_vertices - i - 1];
vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) );
vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) + poly_normal );
}
osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array();
colors_normals->resize( num_vertices * 2, osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) );
osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry();
geometry_normals->setVertexArray( vertices_normals );
geometry_normals->setColorArray( colors_normals );
geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX );
geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF );
geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) );
geode->addDrawable( geometry_normals );
#endif
}
//#define DEBUG_DRAW_NORMALS
static void drawMeshSet( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* geode, double crease_angle, double min_triangle_area, bool add_color_array = false )
{
if( !meshset )
{
return;
}
osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array();
if( !vertices_tri ) { throw OutOfMemoryException(); }
osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array();
if( !normals_tri ) { throw OutOfMemoryException(); }
osg::ref_ptr<osg::Vec3Array> vertices_quad;
osg::ref_ptr<osg::Vec3Array> normals_quad;
const size_t max_num_faces_per_vertex = 10000;
std::map<carve::mesh::Face<3>*, double> map_face_area;
std::map<carve::mesh::Face<3>*, double>::iterator it_face_area;
if( crease_angle > 0 )
{
for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh )
{
const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh];
const size_t num_faces = mesh->faces.size();
for( size_t i_face = 0; i_face != num_faces; ++i_face )
{
carve::mesh::Face<3>* face = mesh->faces[i_face];
// compute area of projected face:
std::vector<vec2> projected;
face->getProjectedVertices( projected );
double face_area = carve::geom2d::signedArea( projected );
map_face_area[face] = abs( face_area );
}
}
}
for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh )
{
const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh];
const size_t num_faces = mesh->faces.size();
for( size_t i_face = 0; i_face != num_faces; ++i_face )
{
carve::mesh::Face<3>* face = mesh->faces[i_face];
const size_t n_vertices = face->nVertices();
if( n_vertices > 4 )
{
drawFace( face, geode );
continue;
}
const vec3 face_normal = face->plane.N;
if( crease_angle > 0 )
{
carve::mesh::Edge<3>* e = face->edge;
for( size_t jj = 0; jj < n_vertices; ++jj )
{
carve::mesh::Vertex<3>* vertex = e->vert;
vec3 intermediate_normal;
// collect all faces at vertex
// | ^
// | |
// f1 e->rev | | e face
// v |
// <---e1------- <---------------
//-------------> --------------->
// | ^
// | |
// v |
carve::mesh::Edge<3>* e1 = e;// ->rev->next;
carve::mesh::Face<3>* f1 = e1->face;
#ifdef _DEBUG
if( f1 != face )
{
std::cout << "f1 != face" << std::endl;
}
#endif
for( size_t i3 = 0; i3 < max_num_faces_per_vertex; ++i3 )
{
if( !e1->rev )
{
break;
}
if( !e1->rev->next )
{
break;
}
vec3 f1_normal = f1->plane.N;
const double cos_angle = dot( f1_normal, face_normal );
if( cos_angle > 0 )
{
const double deviation = std::abs( cos_angle - 1.0 );
if( deviation < crease_angle )
{
double weight = 0.0;
it_face_area = map_face_area.find( f1 );
if( it_face_area != map_face_area.end() )
{
weight = it_face_area->second;
}
intermediate_normal += weight*f1_normal;
}
}
if( !e1->rev )
{
// it's an open mesh
break;
}
e1 = e1->rev->next;
if( !e1 )
{
break;
}
f1 = e1->face;
#ifdef _DEBUG
if( e1->vert != vertex )
{
std::cout << "e1->vert != vertex" << std::endl;
}
#endif
if( f1 == face )
{
break;
}
}
const double intermediate_normal_length = intermediate_normal.length();
if( intermediate_normal_length < 0.0000000001 )
{
intermediate_normal = face_normal;
}
else
{
// normalize:
intermediate_normal *= 1.0 / intermediate_normal_length;
}
const vec3& vertex_v = vertex->v;
if( face->n_edges == 3 )
{
const carve::mesh::Edge<3>* edge0 = face->edge;
const carve::mesh::Edge<3>* edge1 = edge0->next;
const carve::mesh::Edge<3>* edge2 = edge1->next;
const carve::mesh::Vertex<3>* v0 = edge0->vert;
const carve::mesh::Vertex<3>* v1 = edge1->vert;
const carve::mesh::Vertex<3>* v2 = edge2->vert;
vec3 vert0 = v0->v;
vec3 vert1 = v1->v;
vec3 vert2 = v2->v;
vec3 v0v1 = vert1 - vert0;
vec3 v0v2 = vert2 - vert0;
double area = (carve::geom::cross(v0v1, v0v2).length())*0.5;
if (abs(area) > min_triangle_area) // skip degenerated triangle
{
vertices_tri->push_back(osg::Vec3(vertex_v.x, vertex_v.y, vertex_v.z));
normals_tri->push_back(osg::Vec3(intermediate_normal.x, intermediate_normal.y, intermediate_normal.z));
}
}
else if( face->n_edges == 4 )
{
if( !vertices_quad ) vertices_quad = new osg::Vec3Array();
vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) );
if( !normals_quad ) normals_quad = new osg::Vec3Array();
normals_quad->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) );
}
e = e->next;
}
}
else
{
carve::mesh::Edge<3>* e = face->edge;
for( size_t jj = 0; jj < n_vertices; ++jj )
{
carve::mesh::Vertex<3>* vertex = e->vert;
const vec3& vertex_v = vertex->v;
if( face->n_edges == 3 )
{
const carve::mesh::Edge<3>* edge0 = face->edge;
const carve::mesh::Edge<3>* edge1 = edge0->next;
const carve::mesh::Edge<3>* edge2 = edge1->next;
const carve::mesh::Vertex<3>* v0 = edge0->vert;
const carve::mesh::Vertex<3>* v1 = edge1->vert;
const carve::mesh::Vertex<3>* v2 = edge2->vert;
vec3 vert0 = v0->v;
vec3 vert1 = v1->v;
vec3 vert2 = v2->v;
vec3 v0v1 = vert1 - vert0;
vec3 v0v2 = vert2 - vert0;
double area = (carve::geom::cross(v0v1, v0v2).length())*0.5;
if (abs(area) > min_triangle_area) // skip degenerated triangle
{
vertices_tri->push_back(osg::Vec3(vertex_v.x, vertex_v.y, vertex_v.z));
normals_tri->push_back(osg::Vec3(face_normal.x, face_normal.y, face_normal.z));
}
}
else if( face->n_edges == 4 )
{
if( !vertices_quad ) vertices_quad = new osg::Vec3Array();
vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) );
if( !normals_quad ) normals_quad = new osg::Vec3Array();
normals_quad->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) );
}
e = e->next;
}
}
}
}
if( vertices_tri->size() > 0 )
{
osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry();
if( !geometry ) { throw OutOfMemoryException(); }
geometry->setVertexArray( vertices_tri );
geometry->setNormalArray( normals_tri );
normals_tri->setBinding( osg::Array::BIND_PER_VERTEX );
if( add_color_array )
{
osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array();
if( !colors ) { throw OutOfMemoryException(); }
colors->resize( vertices_tri->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) );
colors->setBinding( osg::Array::BIND_PER_VERTEX );
geometry->setColorArray( colors );
}
geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) );
if( !geometry ) { throw OutOfMemoryException(); }
geode->addDrawable( geometry );
#ifdef DEBUG_DRAW_NORMALS
osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array();
for( size_t i = 0; i < vertices_tri->size(); ++i )
{
osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i];
osg::Vec3f& normal_vec = normals_tri->at( i );
vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) );
vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec );
}
osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array();
colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) );
osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry();
geometry_normals->setVertexArray( vertices_normals );
geometry_normals->setColorArray( colors_normals );
geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX );
geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF );
geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) );
geode->addDrawable( geometry_normals );
#endif
}
if( vertices_quad )
{
if( vertices_quad->size() > 0 )
{
osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry();
if( !geometry ) { throw OutOfMemoryException(); }
geometry->setVertexArray( vertices_quad );
if( normals_quad )
{
normals_quad->setBinding( osg::Array::BIND_PER_VERTEX );
geometry->setNormalArray( normals_quad );
}
if( add_color_array )
{
osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array();
if( !colors ) { throw OutOfMemoryException(); }
colors->resize( vertices_quad->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) );
colors->setBinding( osg::Array::BIND_PER_VERTEX );
geometry->setColorArray( colors );
}
geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) );
if( !geometry ) { throw OutOfMemoryException(); }
geode->addDrawable( geometry );
}
}
}
static void drawPolyline( const carve::input::PolylineSetData* polyline_data, osg::Geode* geode, bool add_color_array = false )
{
osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array();
if( !vertices ) { throw OutOfMemoryException(); }
carve::line::PolylineSet* polyline_set = polyline_data->create( carve::input::opts() );
if( polyline_set->vertices.size() < 2 )
{
#ifdef _DEBUG
std::cout << __FUNC__ << ": polyline_set->vertices.size() < 2" << std::endl;
#endif
return;
}
for( auto it = polyline_set->lines.begin(); it != polyline_set->lines.end(); ++it )
{
const carve::line::Polyline* pline = *it;
size_t vertex_count = pline->vertexCount();
for( size_t vertex_i = 0; vertex_i < vertex_count; ++vertex_i )
{
if( vertex_i >= polyline_set->vertices.size() )
{
#ifdef _DEBUG
std::cout << __FUNC__ << ": vertex_i >= polyline_set->vertices.size()" << std::endl;
#endif
continue;
}
const carve::line::Vertex* v = pline->vertex( vertex_i );
vertices->push_back( osg::Vec3d( v->v[0], v->v[1], v->v[2] ) );
}
}
osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry();
if( !geometry ) { throw OutOfMemoryException(); }
geometry->setVertexArray( vertices );
geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINE_STRIP, 0, vertices->size() ) );
if( add_color_array )
{
osg::Vec4f color( 0.6f, 0.6f, 0.6f, 0.1f );
osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array( vertices->size(), &color );
if( !colors ) { throw OutOfMemoryException(); }
colors->setBinding( osg::Array::BIND_PER_VERTEX );
geometry->setColorArray( colors );
}
geode->addDrawable( geometry );
}
static void computeCreaseEdgesFromMeshset( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, std::vector<carve::mesh::Edge<3>* >& vec_edges_out, const double crease_angle )
{
if( !meshset )
{
return;
}
for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh )
{
const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh];
const std::vector<carve::mesh::Edge<3>* >& vec_closed_edges = mesh->closed_edges;
for( size_t i_edge = 0; i_edge < vec_closed_edges.size(); ++i_edge )
{
carve::mesh::Edge<3>* edge = vec_closed_edges[i_edge];
if( !edge )
{
continue;
}
carve::mesh::Edge<3>* edge_reverse = edge->rev;
if( !edge_reverse )
{
continue;
}
carve::mesh::Face<3>* face = edge->face;
carve::mesh::Face<3>* face_reverse = edge_reverse->face;
const carve::geom::vector<3>& f1_normal = face->plane.N;
const carve::geom::vector<3>& f2_normal = face_reverse->plane.N;
const double cos_angle = dot( f1_normal, f2_normal );
if( cos_angle > 0 )
{
const double deviation = std::abs( cos_angle - 1.0 );
if( deviation < crease_angle )
{
continue;
}
}
// TODO: if area of face and face_reverse is equal, skip the crease edge. It could be the inside or outside of a cylinder. Check also if > 2 faces in a row have same normal angle differences
vec_edges_out.push_back( edge );
}
}
}
static void renderMeshsetCreaseEdges( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* target_geode, const double crease_angle, const float line_width )
{
if( !meshset )
{
return;
}
if( !target_geode )
{
return;
}
std::vector<carve::mesh::Edge<3>* > vec_crease_edges;
computeCreaseEdgesFromMeshset( meshset, vec_crease_edges, crease_angle );
if( vec_crease_edges.size() > 0 )
{
osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array();
for( size_t i_edge = 0; i_edge < vec_crease_edges.size(); ++i_edge )
{
const carve::mesh::Edge<3>* edge = vec_crease_edges[i_edge];
const carve::geom::vector<3>& vertex1 = edge->v1()->v;
const carve::geom::vector<3>& vertex2 = edge->v2()->v;
vertices->push_back( osg::Vec3d( vertex1.x, vertex1.y, vertex1.z ) );
vertices->push_back( osg::Vec3d( vertex2.x, vertex2.y, vertex2.z ) );
}
osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry();
geometry->setName("creaseEdges");
geometry->setVertexArray( vertices );
geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices->size() ) );
geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
geometry->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON );
geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::LineWidth( line_width ), osg::StateAttribute::ON );
osg::Material* mat = new osg::Material();
mat->setDiffuse(osg::Material::FRONT, osg::Vec4f(0.3f, 0.3f, 0.35f, 0.8f));
geometry->getOrCreateStateSet()->setAttributeAndModes(mat, osg::StateAttribute::ON);
geometry->getOrCreateStateSet()->setMode( GL_LINE_SMOOTH, osg::StateAttribute::ON );
geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::Hint( GL_LINE_SMOOTH_HINT, GL_NICEST ), osg::StateAttribute::ON );
geometry->getOrCreateStateSet()->setRenderBinDetails( 10, "RenderBin");
target_geode->addDrawable( geometry );
}
}
void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp )
{
for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii )
{
const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii];
if( !appearance )
{
continue;
}
if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_SURFACE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY )
{
osg::ref_ptr<osg::StateSet> item_stateset;
convertToOSGStateSet( appearance, item_stateset );
if( item_stateset )
{
osg::StateSet* existing_item_stateset = grp->getStateSet();
if( existing_item_stateset )
{
if( existing_item_stateset != item_stateset )
{
existing_item_stateset->merge( *item_stateset );
}
}
else
{
grp->setStateSet( item_stateset );
}
}
}
else if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE )
{
}
}
}
osg::Matrixd convertMatrixToOSG( const carve::math::Matrix& mat_in )
{
return osg::Matrixd( mat_in.m[0][0], mat_in.m[0][1], mat_in.m[0][2], mat_in.m[0][3],
mat_in.m[1][0], mat_in.m[1][1], mat_in.m[1][2], mat_in.m[1][3],
mat_in.m[2][0], mat_in.m[2][1], mat_in.m[2][2], mat_in.m[2][3],
mat_in.m[3][0], mat_in.m[3][1], mat_in.m[3][2], mat_in.m[3][3] );
}
//\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object
// caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock
void convertProductShapeToOSG( shared_ptr<ProductShapeData>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches )
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_shape->m_ifc_object_definition);
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if( !ifc_product )
{
return;
}
std::string product_guid;
if (ifc_product->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
product_guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value);
}
std::stringstream strs_product_switch_name;
strs_product_switch_name << product_guid << ":" << ifc_product->className() << " group";
bool draw_bounding_box = false;
double crease_angle = m_geom_settings->getCoplanarFacesMaxDeltaAngle();
double min_triangle_area = m_geom_settings->getMinTriangleArea();
std::vector<osg::ref_ptr<osg::Switch> > vec_current_switches;
// create OSG objects
std::vector<shared_ptr<RepresentationData> >& vec_product_representations = product_shape->m_vec_representations;
for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation )
{
const shared_ptr<RepresentationData>& product_representation_data = vec_product_representations[ii_representation];
if( product_representation_data->m_ifc_representation.expired() )
{
continue;
}
shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation );
const int representation_id = ifc_representation->m_entity_id;
osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch();
#ifdef _DEBUG
std::stringstream strs_representation_name;
strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation;
representation_switch->setName( strs_representation_name.str().c_str() );
#endif
const std::vector<shared_ptr<ItemShapeData> >& product_items = product_representation_data->m_vec_item_data;
for( size_t i_item = 0; i_item < product_items.size(); ++i_item )
{
const shared_ptr<ItemShapeData>& item_shape = product_items[i_item];
osg::ref_ptr<osg::MatrixTransform> item_group = new osg::MatrixTransform();
if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); }
#ifdef _DEBUG
std::stringstream strs_item_name;
strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item;
item_group->setName( strs_item_name.str().c_str() );
#endif
// create shape for open shells
for( size_t ii = 0; ii < item_shape->m_meshsets_open.size(); ++ii )
{
shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets_open[ii];
CSG_Adapter::retriangulateMeshSet( item_meshset );
osg::ref_ptr<osg::Geode> geode = new osg::Geode();
if( !geode ) { throw OutOfMemoryException( __FUNC__ ); }
drawMeshSet( item_meshset, geode, crease_angle, min_triangle_area );
if( m_geom_settings->getRenderCreaseEdges() )
{
renderMeshsetCreaseEdges( item_meshset, geode, m_geom_settings->getCreaseEdgesMaxDeltaAngle(), m_geom_settings->getCreaseEdgesLineWidth() );
}
// disable back face culling for open meshes
geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF );
item_group->addChild( geode );
if( draw_bounding_box )
{
carve::geom::aabb<3> bbox = item_meshset->getAABB();
osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry();
drawBoundingBox( bbox, bbox_geom );
geode->addDrawable( bbox_geom );
}
#ifdef _DEBUG
std::stringstream strs_item_meshset_name;
strs_item_meshset_name << strs_item_name.str().c_str() << ", open meshset " << ii;
geode->setName( strs_item_meshset_name.str().c_str() );
#endif
}
// create shape for meshsets
for( size_t ii = 0; ii < item_shape->m_meshsets.size(); ++ii )
{
shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets[ii];
CSG_Adapter::retriangulateMeshSet( item_meshset );
osg::ref_ptr<osg::Geode> geode_meshset = new osg::Geode();
if( !geode_meshset ) { throw OutOfMemoryException( __FUNC__ ); }
drawMeshSet( item_meshset, geode_meshset, crease_angle, min_triangle_area);
item_group->addChild( geode_meshset );
if( m_geom_settings->getRenderCreaseEdges() )
{
renderMeshsetCreaseEdges( item_meshset, geode_meshset, m_geom_settings->getCreaseEdgesMaxDeltaAngle(), m_geom_settings->getCreaseEdgesLineWidth() );
}
if( draw_bounding_box )
{
carve::geom::aabb<3> bbox = item_meshset->getAABB();
osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry();
drawBoundingBox( bbox, bbox_geom );
geode_meshset->addDrawable( bbox_geom );
}
#ifdef _DEBUG
std::stringstream strs_item_meshset_name;
strs_item_meshset_name << strs_item_name.str().c_str() << ", meshset " << ii;
geode_meshset->setName( strs_item_meshset_name.str().c_str() );
#endif
}
// create shape for points
const std::vector<shared_ptr<carve::input::VertexData> >& vertex_points = item_shape->getVertexPoints();
for( size_t ii = 0; ii < vertex_points.size(); ++ii )
{
const shared_ptr<carve::input::VertexData>& pointset_data = vertex_points[ii];
if( pointset_data )
{
if( pointset_data->points.size() > 0 )
{
osg::ref_ptr<osg::Geode> geode = new osg::Geode();
if( !geode ) { throw OutOfMemoryException( __FUNC__ ); }
osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array();
for( size_t i_pointset_point = 0; i_pointset_point < pointset_data->points.size(); ++i_pointset_point )
{
vec3& carve_point = pointset_data->points[i_pointset_point];
vertices->push_back( osg::Vec3d( carve_point.x, carve_point.y, carve_point.z ) );
}
osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry();
geometry->setVertexArray( vertices );
geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) );
geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON );
geode->addDrawable( geometry );
geode->setCullingActive( false );
item_group->addChild( geode );
#ifdef _DEBUG
std::stringstream strs_item_meshset_name;
strs_item_meshset_name << strs_item_name.str().c_str() << ", vertex_point " << ii;
geode->setName( strs_item_meshset_name.str().c_str() );
#endif
}
}
}
// create shape for polylines
for( size_t ii = 0; ii < item_shape->m_polylines.size(); ++ii )
{
shared_ptr<carve::input::PolylineSetData>& polyline_data = item_shape->m_polylines[ii];
osg::ref_ptr<osg::Geode> geode = new osg::Geode();
if( !geode ) { throw OutOfMemoryException( __FUNC__ ); }
geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
drawPolyline( polyline_data.get(), geode );
item_group->addChild( geode );
#ifdef _DEBUG
std::stringstream strs_item_meshset_name;
strs_item_meshset_name << strs_item_name.str().c_str() << ", polylines " << ii;
geode->setName( strs_item_meshset_name.str().c_str() );
#endif
}
if( m_geom_settings->isShowTextLiterals() )
{
for( size_t ii = 0; ii < item_shape->m_vec_text_literals.size(); ++ii )
{
shared_ptr<TextItemData>& text_data = item_shape->m_vec_text_literals[ii];
if( !text_data )
{
continue;
}
carve::math::Matrix& text_pos = text_data->m_text_position;
// TODO: handle rotation
std::string text_str;
text_str.assign( text_data->m_text.begin(), text_data->m_text.end() );
osg::Vec3 pos2( text_pos._41, text_pos._42, text_pos._43 );
osg::ref_ptr<osgText::Text> txt = new osgText::Text();
if( !txt )
{
throw OutOfMemoryException( __FUNC__ );
}
txt->setFont( "fonts/arial.ttf" );
txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) );
txt->setCharacterSize( 0.1f );
txt->setAutoRotateToScreen( true );
txt->setPosition( pos2 );
txt->setText( text_str.c_str() );
txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF );
osg::ref_ptr<osg::Geode> geode = new osg::Geode();
if( !geode ){ throw OutOfMemoryException( __FUNC__ ); }
geode->addDrawable( txt );
item_group->addChild( geode );
}
}
// apply statesets if there are any
if( item_shape->m_vec_item_appearances.size() > 0 )
{
applyAppearancesToGroup( item_shape->m_vec_item_appearances, item_group );
}
// If anything has been created, add it to the representation group
if( item_group->getNumChildren() > 0 )
{
#ifdef _DEBUG
if( item_group->getNumParents() > 0 )
{
std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl;
}
#endif
representation_switch->addChild( item_group );
}
}
// apply statesets if there are any
if( product_representation_data->m_vec_representation_appearances.size() > 0 )
{
applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch );
}
// If anything has been created, add it to the product group
if( representation_switch->getNumChildren() > 0 )
{
#ifdef _DEBUG
if( representation_switch->getNumParents() > 0 )
{
std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl;
}
#endif
// enable transparency for certain objects
if( dynamic_pointer_cast<IfcSpace>(ifc_product) )
{
SceneGraphUtils::setMaterialAlpha(representation_switch, 0.1f, true);
}
else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) )
{
SceneGraphUtils::setMaterialAlpha( representation_switch, 0.2f, false );
}
// check if parent building element is window
if( ifc_product->m_Decomposes_inverse.size() > 0 )
{
for( size_t ii_decomposes = 0; ii_decomposes < ifc_product->m_Decomposes_inverse.size(); ++ii_decomposes )
{
const weak_ptr<IfcRelAggregates>& decomposes_weak = ifc_product->m_Decomposes_inverse[ii_decomposes];
if( decomposes_weak.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> decomposes_ptr(decomposes_weak);
shared_ptr<IfcObjectDefinition>& relating_object = decomposes_ptr->m_RelatingObject;
if( relating_object )
{
if( dynamic_pointer_cast<IfcCurtainWall>(relating_object) || dynamic_pointer_cast<IfcWindow>(relating_object) )
{
SceneGraphUtils::setMaterialAlpha(representation_switch, 0.6f, false);
}
}
}
}
map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) );
vec_current_switches.push_back(representation_switch);
}
}
// TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs
if (product_shape->m_vec_product_appearances.size() > 0)
{
for (auto representation_switch : vec_current_switches)
{
applyAppearancesToGroup(product_shape->m_vec_product_appearances, representation_switch);
}
}
}
/*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData.
\param[out] parent_group Group to append the geometry.
**/
void convertToOSG( const std::map<std::string, shared_ptr<ProductShapeData> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group )
{
progressTextCallback( L"Converting geometry to OpenGL format ..." );
progressValueCallback( 0, "scenegraph" );
m_map_entity_guid_to_switch.clear();
m_map_representation_id_to_switch.clear();
shared_ptr<ProductShapeData> ifc_project_data;
std::vector<shared_ptr<ProductShapeData> > vec_products;
for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it )
{
shared_ptr<ProductShapeData> shape_data = it->second;
if( shape_data )
{
vec_products.push_back( shape_data );
}
}
// create geometry for for each IfcProduct independently, spatial structure will be resolved later
std::map<std::string, osg::ref_ptr<osg::Switch> >* map_entity_guid = &m_map_entity_guid_to_switch;
std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_id_to_switch;
const int num_products = (int)vec_products.size();
#ifdef ENABLE_OPENMP
Mutex writelock_map;
Mutex writelock_ifc_project;
Mutex writelock_message_callback;
#pragma omp parallel firstprivate(num_products) shared(map_entity_guid, map_representations)
{
// time for one product may vary significantly, so schedule not so many
#pragma omp for schedule(dynamic,40)
#endif
for( int i = 0; i < num_products; ++i )
{
shared_ptr<ProductShapeData>& shape_data = vec_products[i];
weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition;
if( ifc_object_def_weak.expired() )
{
continue;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(shape_data->m_ifc_object_definition);
shared_ptr<IfcProject> ifc_project = dynamic_pointer_cast<IfcProject>(ifc_object_def);
if (ifc_project)
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock(writelock_ifc_project);
#endif
ifc_project_data = shape_data;
}
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if (!ifc_product)
{
continue;
}
std::stringstream thread_err;
if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_product) )
{
// geometry will be created in method subtractOpenings
continue;
}
if( !ifc_product->m_Representation )
{
continue;
}
const int product_id = ifc_product->m_entity_id;
std::string product_guid;
std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches;
try
{
convertProductShapeToOSG( shape_data, map_representation_switches );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
thread_err << e.what();
}
catch( carve::exception& e )
{
thread_err << e.str();
}
catch( std::exception& e )
{
thread_err << e.what();
}
catch( ... )
{
thread_err << "undefined error, product id " << product_id;
}
if (ifc_product->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
product_guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value);
}
if( map_representation_switches.size() > 0 )
{
osg::ref_ptr<osg::Switch> product_switch = new osg::Switch();
osg::ref_ptr<osg::MatrixTransform> product_transform = new osg::MatrixTransform();
product_transform->setMatrix( convertMatrixToOSG( shape_data->getTransform() ) );
product_switch->addChild( product_transform );
std::stringstream strs_product_switch_name;
strs_product_switch_name << product_guid << ":" << ifc_product->className() << " group";
product_switch->setName( strs_product_switch_name.str().c_str() );
for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map )
{
osg::ref_ptr<osg::Switch>& repres_switch = it_map->second;
product_transform->addChild( repres_switch );
}
// apply statesets if there are any
const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances();
if( vec_product_appearances.size() > 0 )
{
applyAppearancesToGroup( vec_product_appearances, product_switch );
}
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_map );
#endif
map_entity_guid->insert(std::make_pair(product_guid, product_switch));
map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() );
}
if( thread_err.tellp() > 0 )
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_message_callback );
#endif
messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
// progress callback
double progress = (double)i / (double)num_products;
if( progress - m_recent_progress > 0.02 )
{
#ifdef ENABLE_OPENMP
if( omp_get_thread_num() == 0 )
#endif
{
// leave 10% of progress to openscenegraph internals
progressValueCallback( progress*0.9, "scenegraph" );
m_recent_progress = progress;
}
}
}
#ifdef ENABLE_OPENMP
} // implicit barrier
#endif
try
{
// now resolve spatial structure
if( ifc_project_data )
{
resolveProjectStructure( ifc_project_data, parent_group );
}
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( ... )
{
messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
progressValueCallback( 0.9, "scenegraph" );
}
void addNodes( const std::map<std::string, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group )
{
// check if there are entities that are not in spatial structure
if( !target_group )
{
target_group = new osg::Switch();
}
for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes )
{
std::string product_guid = it_product_shapes->first;
auto it_find = m_map_entity_guid_to_switch.find(product_guid);
if( it_find != m_map_entity_guid_to_switch.end() )
{
osg::ref_ptr<osg::Switch>& sw = it_find->second;
if( sw )
{
target_group->addChild( sw );
}
}
}
}
void resolveProjectStructure( const shared_ptr<ProductShapeData>& product_data, osg::ref_ptr<osg::Switch> group )
{
if( !product_data )
{
return;
}
if( product_data->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def(product_data->m_ifc_object_definition);
if (!ifc_object_def)
{
return;
}
std::string guid;
if (ifc_object_def->m_GlobalId)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX;
guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value);
}
if( SceneGraphUtils::inParentList(guid, group ) )
{
messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, ifc_object_def.get() );
return;
}
const std::vector<shared_ptr<ProductShapeData> >& vec_children = product_data->getChildren();
for( size_t ii = 0; ii < vec_children.size(); ++ii )
{
const shared_ptr<ProductShapeData>& child_product_data = vec_children[ii];
if( !child_product_data )
{
continue;
}
osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch();
if( !child_product_data->m_ifc_object_definition.expired() )
{
shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition );
std::stringstream group_subparts_name;
group_subparts_name << guid << ":" << ifc_object_def->className();
group_subparts->setName( group_subparts_name.str().c_str() );
}
group->addChild( group_subparts );
resolveProjectStructure( child_product_data, group_subparts );
}
auto it_product_map = m_map_entity_guid_to_switch.find(guid);
if( it_product_map != m_map_entity_guid_to_switch.end() )
{
const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second;
if( product_switch )
{
group->addChild( product_switch );
}
}
else
{
if( group->getNumChildren() == 0 )
{
osg::ref_ptr<osg::Switch> product_switch = new osg::Switch();
group->addChild( product_switch );
std::stringstream switch_name;
switch_name << guid << ":" << ifc_object_def->className();
product_switch->setName( switch_name.str().c_str() );
}
m_map_entity_guid_to_switch[guid] = group;
}
}
void convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence, osg::ref_ptr<osg::StateSet>& target_stateset )
{
if( !appearence )
{
return;
}
const float shininess = appearence->m_shininess;
const float transparency = appearence->m_transparency;
const bool set_transparent = appearence->m_set_transparent;
const float color_ambient_r = appearence->m_color_ambient.r();
const float color_ambient_g = appearence->m_color_ambient.g();
const float color_ambient_b = appearence->m_color_ambient.b();
const float color_ambient_a = appearence->m_color_ambient.a();
const float color_diffuse_r = appearence->m_color_diffuse.r();
const float color_diffuse_g = appearence->m_color_diffuse.g();
const float color_diffuse_b = appearence->m_color_diffuse.b();
const float color_diffuse_a = appearence->m_color_diffuse.a();
const float color_specular_r = appearence->m_color_specular.r();
const float color_specular_g = appearence->m_color_specular.g();
const float color_specular_b = appearence->m_color_specular.b();
const float color_specular_a = appearence->m_color_specular.a();
osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency );
osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency );
osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency );
// TODO: material caching and re-use
osg::ref_ptr<osg::Material> mat = new osg::Material();
if( !mat ){ throw OutOfMemoryException(); }
mat->setAmbient( osg::Material::FRONT, ambientColor );
mat->setDiffuse( osg::Material::FRONT, diffuseColor );
mat->setSpecular( osg::Material::FRONT, specularColor );
mat->setShininess( osg::Material::FRONT, shininess );
mat->setColorMode( osg::Material::SPECULAR );
target_stateset = new osg::StateSet();
if( !target_stateset ){ throw OutOfMemoryException(); }
target_stateset->setAttribute( mat, osg::StateAttribute::ON );
if( appearence->m_set_transparent )
{
mat->setTransparency( osg::Material::FRONT, transparency );
target_stateset->setMode( GL_BLEND, osg::StateAttribute::ON );
target_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN );
}
if( appearence->m_specular_exponent != 0.f )
{
//osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights();
//spec_highlights->setSpecularExponent( spec->m_value );
// todo: add to scenegraph
}
}
};
|
GB_unaryop__lnot_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_uint16
// op(A') function: GB_tran__lnot_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_uint16
(
int64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Physics.c | /*
* Physics.c
*
* Created on: Feb 24, 2016
* Author: abauville
*/
#include "stokes.h"
void Physics_Memory_allocate(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numerics* Numerics = &(Model->Numerics);
BC* BCStokes = &(Model->BCStokes);
Physics->dt = Numerics->dtIni; //i.e. 0.1*Char.time/Char.time
Physics->dtAdv = Numerics->dtIni;
Physics->dtT = Numerics->dtIni; //i.e. 0.1*Char.time/Char.time
Physics->dtDarcy = Numerics->dtIni;
//Physics->dtAdv = 1.0;
Numerics->dtPrevTimeStep = Numerics->dtIni; //i.e. 0.1*Char.time/Char.time
Physics->epsRef = fabs(BCStokes->backStrainRate);
if (Physics->epsRef == 0)
Physics->epsRef = 1E0;
Physics->maxVx = (Grid->xmax-Grid->xmin)/Physics->epsRef;
Physics->maxVy = (Grid->ymax-Grid->ymin)/Physics->epsRef;
int i;
Physics->phaseListHead = (SinglePhase**) malloc( Grid->nECTot * sizeof( SinglePhase* ) ); // array of pointers to particles
for (i=0;i<Grid->nECTot;i++) {
Physics->phaseListHead[i] = (SinglePhase*) malloc( 1 * sizeof( SinglePhase ) );
Physics->phaseListHead[i]->phase = -1;
Physics->phaseListHead[i]->weight = 0;
Physics->phaseListHead[i]->next = NULL;
}
Physics->sumOfWeightsCells = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->sumOfWeightsNodes = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->Vx = (compute*) malloc( Grid->nVxTot * sizeof(compute) );
Physics->Vy = (compute*) malloc( Grid->nVyTot * sizeof(compute) );
#if (INERTIA)
Physics->Vx0 = (compute*) malloc( Grid->nVxTot * sizeof(compute) );
Physics->Vy0 = (compute*) malloc( Grid->nVyTot * sizeof(compute) );
#endif
Physics->P = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Z = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->eta = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->khi = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Lambda = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->rho = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#if (STORE_PLASTIC_STRAIN)
Physics->strain = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Dstrain = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Dvorticity_cum = (compute*) malloc( Grid->nSTot * sizeof(compute) );
#endif
#if (EXTRA_PART_FIELD)
Physics->extraField = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#endif
#if (HEAT)
Physics->k = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->T = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->T0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->DT = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#endif
#if (DARCY)
Physics->Pc = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->divV0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->DeltaP0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->DDeltaP = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Pf = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->phi = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // fluid phase fraction
Physics->phi0 = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // fluid phase fraction
Physics->Dphi = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // fluid phase fraction
Physics->perm0_eta_f = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // permeability
Physics->perm_eta_f = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // permeability
Physics->eta_b = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // bulk viscosity
Physics->khi_b = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // bulk plasticity
Physics->Zb = (compute*) malloc( Grid->nECTot * sizeof(compute) ); // bulk effective viscosity
#endif
Physics->G = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->sigma_xx_0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->sigma_xy_0 = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->Dsigma_xx_0 = (compute*) malloc( Grid->nECTot * sizeof(compute) );
Physics->Dsigma_xy_0 = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->khiShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->GShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->etaShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->ZShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->LambdaShear = (compute*) malloc( Grid->nSTot * sizeof(compute) );
Physics->EII_eff = (compute*) malloc(Grid->nECTot*sizeof(compute));
Physics->EII_effShear = (compute*) malloc(Grid->nSTot*sizeof(compute));
Physics->Tau_y = (compute*) malloc(Grid->nECTot*sizeof(compute));
Physics->Tau_yShear = (compute*) malloc(Grid->nSTot*sizeof(compute));
Physics->phase = (int*) malloc( Grid->nECTot * sizeof(int) );
Physics->volumeChange = (compute*) malloc( Grid->nECTot * sizeof(compute) );
// Initialize stuff
//int i;
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVxTot; ++i) {
Physics->Vx[i] = 0.0;
#if (INERTIA)
Physics->Vx0[i] = 0.0;
#endif
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVyTot; ++i) {
Physics->Vy[i] = 0.0;
#if (INERTIA)
Physics->Vy0[i] = 0.0;
#endif
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nECTot; ++i) {
Physics->khi[i] = 1e30;
//Physics->Eps_pxx[i] = 0.0;
#if (STORE_PLASTIC_STRAIN)
Physics->strain[i] = 0.0;
Physics->Dstrain[i] = 0.0;
Physics->Dvorticity_cum[i] = 0.0;
#endif
Physics->volumeChange[i]=0.0;
#if (HEAT)
Physics->T[i] = 1.0;
Physics->DT[i] = 0.0;
#endif
Physics->P[i] = 0.0;
#if (DARCY)
Physics->divV0[i] = 0.0;
Physics->Pf [i] = 0.0;
Physics->Pc [i] = 0.0;
Physics->DeltaP0 [i] = 0.0;
Physics->DDeltaP [i] = 0.0;
Physics->phi [i] = 0.0;
Physics->phi0[i] = 0.0;
#endif
Physics->sigma_xx_0[i] = 0.0;
Physics->Dsigma_xx_0[i] = 0.0;
Physics->Lambda[i] = 1.0;
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nSTot; ++i) {
Physics->sigma_xy_0[i] = 0.0;
Physics->Dsigma_xy_0[i] = 0.0;
Physics->LambdaShear[i] = 1.0;
}
Physics->dtMaxwellMin = 1E+100;
Physics->dtMaxwellMax = 1E-100;
}
void Physics_Memory_free(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
// Free phaseList
printf("free phase list\n");
int iCell;
SinglePhase* temp;
for (iCell=0;iCell<Grid->nECTot;iCell++) {
while (Physics->phaseListHead[iCell] != NULL)
{
temp = Physics->phaseListHead[iCell];
Physics->phaseListHead[iCell] = Physics->phaseListHead[iCell]->next;
free(temp);
}
}
free( Physics->phaseListHead );
printf("free V\n");
free(Physics->Vx);
free(Physics->Vy);
#if (INERTIA)
free(Physics->Vx0);
free(Physics->Vy0);
#endif
printf("free P\n");
free(Physics->P );
printf("free Z\n");
free(Physics->Z);
free( Physics->ZShear );
printf("free eta\n");
free( Physics->eta );
free(Physics->etaShear);
printf("free khi\n");
free( Physics->khi );
free( Physics->khiShear );
printf("free G\n");
free(Physics->G );
free( Physics->GShear );
printf("free Lambda\n");
free( Physics->Lambda );
free(Physics->LambdaShear);
free( Physics->rho );
printf("free EII\n");
free(Physics->EII_eff);
free(Physics->EII_effShear);
printf("free Tau_y\n");
free(Physics->Tau_y);
free(Physics->Tau_yShear);
printf("strain\n");
#if (STORE_PLASTIC_STRAIN)
free(Physics->strain);
free(Physics->Dstrain);
free(Physics->Dvorticity_cum);
#endif
#if (EXTRA_PART_FIELD)
Physics->extraField = (compute*) malloc( Grid->nECTot * sizeof(compute) );
#endif
#if (HEAT)
free( Physics->k );
free(Physics->T );
free(Physics->T0);
free(Physics->DT );
#endif
printf("free phase\n");
free(Physics->phase);
free(Physics->sigma_xx_0 );
free(Physics->sigma_xy_0 );
free(Physics->Dsigma_xx_0 );
free(Physics->Dsigma_xy_0 );
#if (DARCY)
free(Physics->Pc);
free(Physics->divV0);
free(Physics->DeltaP0);
free(Physics->DDeltaP);
free(Physics->Pf);
free(Physics->phi);
free(Physics->Dphi);
free(Physics->phi0);
free(Physics->perm0_eta_f);
free(Physics->perm_eta_f);
free(Physics->eta_b);
free(Physics->Zb);
free(Physics->khi_b);
#endif
free(Physics->sumOfWeightsCells);
free(Physics->sumOfWeightsNodes);
}
void Physics_Phase_addSingle(SinglePhase** pointerToHead, int phase)
{
// Adds a Particle at the beginning of a linked list
SinglePhase* thisPhase = (SinglePhase*) malloc(sizeof(SinglePhase));
thisPhase->phase = phase;
thisPhase->weight = 0.0;
thisPhase->next = NULL;
if (*pointerToHead != NULL) {
thisPhase->next = *pointerToHead;
}
*pointerToHead = thisPhase;
}
void Physics_P_initToLithostatic(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iy, ix, iCell, iCellS, iCellN, iCellW, iCellE;
compute rho_g_h = 0.0;
//compute stress = 0.0;
// Contribution of gy
if (Physics->g[1]>0){
for (ix = 0; ix < Grid->nxEC; ++ix) {
for (iy = 0; iy < Grid->nyEC; ++iy) {
iCell = ix + iy*Grid->nxEC;
iCellS = ix + (iy-1)*Grid->nxEC;
if (iy==0) {
rho_g_h = Physics->rho[iCell] * Physics->g[1] * (-0.5*Grid->DYEC[iy] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellS]) * Physics->g[1] * Grid->DYEC[iy-1] ;
}
Physics->P[iCell] = rho_g_h;
}
}
} else {
for (ix = 0; ix < Grid->nxEC; ++ix) {
for (iy = Grid->nyEC-1; iy >= 0; --iy) {
iCell = ix + iy*Grid->nxEC;
iCellN = ix + (iy+1)*Grid->nxEC;
iCellS = ix + (iy-1)*Grid->nxEC;
if (iy==Grid->nyEC-1) {
rho_g_h = Physics->rho[iCell] * -Physics->g[1] * (-0.5*Grid->DYEC[iy-1] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellN]) * -Physics->g[1] * Grid->DYEC[iy] ;
}
Physics->P[iCell] = rho_g_h;
}
}
}
if (abs(Physics->g[0])>1E-8) {
// Contribution of gx
if (Physics->g[0]>0){
for (iy = 0; iy < Grid->nyEC; ++iy) {
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix + iy*Grid->nxEC;
iCellW = ix-1 + (iy)*Grid->nxEC;
if (ix==0) {
rho_g_h = Physics->rho[iCell] * Physics->g[0] * (-0.5*Grid->DXEC[ix] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellW]) * Physics->g[0] * Grid->DXEC[ix-1] ;
}
Physics->P[iCell] += rho_g_h;
}
}
} else {
for (iy = 0; iy < Grid->nyEC; ++iy) {
for (ix = Grid->nxEC-1; ix >= 0; --ix) {
iCell = ix + iy*Grid->nxEC;
iCellE = ix+1 + (iy)*Grid->nxEC;
iCellW = ix-1 + (iy)*Grid->nxEC;
if (ix==Grid->nxEC-1) {
rho_g_h = Physics->rho[iCell] * -Physics->g[0] * (-0.5*Grid->DXEC[ix-1] );
} else {
rho_g_h += 0.5*(Physics->rho[iCell]+Physics->rho[iCellE]) * -Physics->g[0] * Grid->DXEC[ix] ;
}
Physics->P[iCell] += rho_g_h;
}
}
}
}
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
#if (DARCY)
Physics->Pf[iCell] = Physics->P[iCell];
Physics->Pc[iCell] = 0.0;
Physics->DeltaP0[iCell] = 0.0;
Physics->DDeltaP[iCell] = 0.0;
#endif
}
}
void Physics_Velocity_advectEulerian(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numbering* NumStokes = &(Model->NumStokes);
int ix, iy;
compute dVxdx, dVxdy, dVydx, dVydy;
compute dVxdx0, dVxdy0, dVydx0, dVydy0;
compute* VxNew = (compute*) malloc(Grid->nVxTot * sizeof(compute));
compute* VyNew = (compute*) malloc(Grid->nVyTot * sizeof(compute));
compute Vx, Vy;
compute dt = Physics->dtAdv;
for (iy = 1; iy < Grid->nyVx-1; ++iy) {
for (ix = 1; ix < Grid->nxVx-1; ++ix) {
dVxdx = (Physics->Vx[ix+1 + iy *Grid->nxVx] - Physics->Vx[ix-1 + iy *Grid->nxVx])/(2.0*Grid->dx);
dVxdy = (Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy-1)*Grid->nxVx])/(2.0*Grid->dy);
#if (INERTIA)
dVxdx0 = (Physics->Vx0[ix+1 + iy *Grid->nxVx] - Physics->Vx0[ix-1 + iy *Grid->nxVx])/(2.0*Grid->dx);
dVxdy0 = (Physics->Vx0[ix + (iy+1)*Grid->nxVx] - Physics->Vx0[ix + (iy-1)*Grid->nxVx])/(2.0*Grid->dy);
#else
dVxdx0 = dVxdx;
dVxdy0 = dVxdy;
#endif
Vy = 0.25* (Physics->Vy[ix + (iy )*Grid->nxVy] + Physics->Vy[ix+1 + (iy )*Grid->nxVy] + Physics->Vy[ix + (iy-1)*Grid->nxVy] + Physics->Vy[ix+1 + (iy-1)*Grid->nxVy]);
//VxNew[ix+iy*Grid->nxVx] = Physics->Vx[ix + iy *Grid->nxVx]*(1.0-dt*dVxdx) - dt*Vy*dVxdy;
VxNew[ix+iy*Grid->nxVx] = Physics->Vx[ix + iy *Grid->nxVx]*(1.0-dt*.5*(dVxdx+dVxdx0)) - dt*Vy*.5*(dVxdy+dVxdy0);
}
}
for (iy = 1; iy < Grid->nyVy-1; ++iy) {
for (ix = 1; ix < Grid->nxVy-1; ++ix) {
dVydx = (Physics->Vy[ix+1 + iy *Grid->nxVy] - Physics->Vy[ix-1 + iy *Grid->nxVy])/(2.0*Grid->dx);
dVydy = (Physics->Vy[ix + (iy+1)*Grid->nxVy] - Physics->Vy[ix + (iy-1)*Grid->nxVy])/(2.0*Grid->dy);
#if (INERTIA)
dVydx0 = (Physics->Vy0[ix+1 + iy *Grid->nxVy] - Physics->Vy0[ix-1 + iy *Grid->nxVy])/(2.0*Grid->dx);
dVydy0 = (Physics->Vy0[ix + (iy+1)*Grid->nxVy] - Physics->Vy0[ix + (iy-1)*Grid->nxVy])/(2.0*Grid->dy);
#else
dVydx0 = dVydx;
dVydy0 = dVydy;
#endif
Vx = 0.25* (Physics->Vx[ix + (iy )*Grid->nxVx] + Physics->Vx[ix-1 + (iy )*Grid->nxVx] + Physics->Vx[ix + (iy+1)*Grid->nxVx] + Physics->Vx[ix-1 + (iy+1)*Grid->nxVx]);
//VyNew[ix+iy*Grid->nxVy] = Physics->Vy[ix + iy *Grid->nxVy]*(1.0-dt*dVydy) - Vx*dt*dVydx;
VyNew[ix+iy*Grid->nxVy] = Physics->Vy[ix + iy *Grid->nxVy]*(1.0-dt*.5*(dVydy+dVydy0)) - Vx*dt*.5*(dVydx+dVydx0);
}
}
int iVx, iVy, InoDir;
for (iy = 0; iy < Grid->nyVx; ++iy) {
for (ix = 0; ix < Grid->nxVx; ++ix) {
iVx = ix + iy*Grid->nxVx;
InoDir = NumStokes->map[iVx];
if (Grid->isPeriodic) {
printf("error: in Physics_interpFromParticlestoCell: the implementation of the interpolation of velocities from particles to cell is not finished for the case of periodic BC");
}
if (InoDir>=0) { // Not a Dirichlet node
Physics->Vx [iVx] = VxNew[iVx];
#if (INERTIA)
Physics->Vx0[iVx] = VxNew[iVx];
#endif
} else {
#if (INERTIA)
Physics->Vx0[iVx] = Physics->Vx[iVx];
#endif
}
}
}
for (iy = 0; iy < Grid->nyVy; ++iy) {
for (ix = 0; ix < Grid->nxVy; ++ix) {
iVy = ix + iy*Grid->nxVy;
InoDir = NumStokes->map[iVy + Grid->nVxTot];
if (Grid->isPeriodic) {
printf("error: in Physics_interpFromParticlestoCell: the implementation of the interpolation of velocities from particles to cell is not finished for the case of periodic BC");
}
if (InoDir>=0) { // Not a Dirichlet node
Physics->Vy [iVy] = VyNew[iVy];
#if (INERTIA)
Physics->Vy0[iVy] = VyNew[iVy];
#endif
} else {
#if (INERTIA)
Physics->Vy0[iVy] = Physics->Vy[iVy];
#endif
}
}
}
free(VxNew);
free(VyNew);
}
void Physics_Velocity_retrieveFromSolution(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
BC* BC = &(Model->BCStokes);
Numbering* Numbering = &(Model->NumStokes);
EqSystem* EqSystem = &(Model->EqStokes);
// Declarations
// =========================
int ix, iy, i;
int I;
int InoDir, INeigh;
// Init Vx, Vy, P to -1, for debugging purposes
// =========================
for (i = 0; i < Grid->nVxTot; ++i) {
Physics->Vx[i] = -1;
}
for (i = 0; i < Grid->nVyTot; ++i) {
Physics->Vy[i] = -1;
}
for (i = 0; i < Grid->nECTot; ++i) {
Physics->P[i] = -1;
}
// Set Vx
// =========================
int IBC;
compute scale;
#pragma omp parallel for private(iy, ix, I, InoDir, IBC, INeigh, scale) OMP_SCHEDULE // maxVx would conflict
for (iy = 0; iy < Grid->nyVx; ++iy) {
for (ix = 0; ix < Grid->nxVx; ++ix) {
I = ix + iy*Grid->nxVx;
InoDir = Numbering->map[I];
scale = 1.0;//EqSystem->S[InoDir];
if (InoDir>=0) { // Not a Dirichlet node
scale = 1.0;//EqSystem->S[InoDir];
Physics->Vx[I] = EqSystem->x[InoDir]*scale;
}
// Deal with boundary conditions
else { // Dirichlet or Neumann
IBC = abs(InoDir)-1; // BC nodes are numbered -1 to -n
if (BC->type[IBC]==Dirichlet) { // Dirichlet on normal node
Physics->Vx[I] = BC->value[IBC];
}
else if (BC->type[IBC]==Neumann) { // Neumann on normal node
// Get neighbours index
if (ix==0) { // left boundary
INeigh = Numbering->map[ ix+1 + (iy)*Grid->nxVx ];
if (INeigh<0) {
if (iy==0) {
INeigh = Numbering->map[ ix+1 + (iy+1)*Grid->nxVx ];
} else if (iy==Grid->nyVx-1) {
INeigh = Numbering->map[ ix+1 + (iy-1)*Grid->nxVx ];
}
}
Physics->Vx[I] = EqSystem->x[INeigh]*scale;// - BC->value[IBC] *Grid->dx/(2*Physics->Z[ix+1 + (iy)*Grid->nxEC ]);
} else if (ix==Grid->nxVx-1) { // right boundary
INeigh = Numbering->map[ ix-1 + (iy)*Grid->nxVx ];
if (INeigh<0) {
if (iy==0) {
INeigh = Numbering->map[ ix-1 + (iy+1)*Grid->nxVx ];
} else if (iy==Grid->nyVx-1) {
INeigh = Numbering->map[ ix-1 + (iy-1)*Grid->nxVx ];
}
}
Physics->Vx[I] = EqSystem->x[INeigh]*scale;// + BC->value[IBC] *Grid->dx/(2*Physics->Z[ix + (iy)*Grid->nxEC ]);
} else {
INeigh = 0;
printf("error internal BC are not properly taken into account yet. (Neumann Vx)\n");
exit(0);
}
}
else { // on a ghost node
// Get neighbours index
if (iy==0) { // lower boundary
INeigh = Numbering->map[ ix + (iy+1)*Grid->nxVx ];
} else if (iy==Grid->nyVx-1) { // upper boundary
INeigh = Numbering->map[ ix + (iy-1)*Grid->nxVx ];
} else {
//INeigh = 0;
INeigh = Numbering->map[ ix + (BC->iyTopRow-1)*Grid->nxVx ];
//printf("error internal BC are not properly taken into account yet. (Ghost Vx)\n");
//exit(0);
}
scale = 1.0;//EqSystem->S[INeigh];
if (BC->type[IBC]==DirichletGhost) { // Dirichlet
Physics->Vx[I] = 2.0*BC->value[IBC] - EqSystem->x[INeigh]*scale;
}
else if (BC->type[IBC]==NeumannGhost) { // Neumann
if (iy==0) // lower boundary
Physics->Vx[I] = EqSystem->x[INeigh]*scale - BC->value[IBC]/Physics->ZShear[ix + 0*Grid->nxS]*Grid->dy;
if (iy==Grid->nyVx-1) // top boundary
Physics->Vx[I] = EqSystem->x[INeigh]*scale + BC->value[IBC]/Physics->ZShear[ix + (Grid->nyS-1)*Grid->nxS]*Grid->dy;
}
else {
printf("error: unknown boundary type\n");
exit(0);
}
}
}
}
}
// Set Vy
// =========================
int IMap;
//#pragma omp parallel for private(iy, ix, I, IMap, InoDir, IBC, INeigh, scale) OMP_SCHEDULE // maxVx would conflict
for (iy = 0; iy < Grid->nyVy; ++iy) {
for (ix = 0; ix < Grid->nxVy; ++ix) {
IMap = ix + iy*Grid->nxVy + Grid->nVxTot;
I = ix + iy*Grid->nxVy;
InoDir = Numbering->map[IMap];
if (InoDir>=0) { // Not a Dirichlet node
scale = 1.0;//EqSystem->S[InoDir];
Physics->Vy[I] = EqSystem->x[InoDir]*scale;
}
// Deal with boundary conditions
else { // Dirichlet or Neumann
IBC = abs(InoDir)-1;
if (BC->type[IBC]==Dirichlet) { // Dirichlet on normal node
Physics->Vy[I] = BC->value[IBC];
}
else if (BC->type[IBC]==Neumann) {
// Get neighbours index
if (iy==0) { // lower boundary
INeigh = Numbering->map[ ix + (iy+1)*Grid->nxVy + Grid->nVxTot ];
if (INeigh<0) {
if (ix==0) {
INeigh = Numbering->map[ ix+1 + (iy+1)*Grid->nxVy ];
} else if (ix==Grid->nxVy-1) {
INeigh = Numbering->map[ ix-1 + (iy+1)*Grid->nxVy ];
}
}
Physics->Vy[I] = EqSystem->x[INeigh]*scale;// - BC->value[IBC] *Grid->dx/(2*Physics->Z[ix + (iy+1)*Grid->nxEC ]);
} else if (iy==Grid->nyVy-1) { // top boundary
INeigh = Numbering->map[ ix + (iy-1)*Grid->nxVy + Grid->nVxTot ];
if (INeigh<0) {
if (ix==0) {
INeigh = Numbering->map[ ix+1 + (iy-1)*Grid->nxVy ];
} else if (ix==Grid->nxVy-1) {
INeigh = Numbering->map[ ix-1 + (iy-1)*Grid->nxVy ];
}
}
Physics->Vy[I] = EqSystem->x[INeigh]*scale;// + BC->value[IBC] *Grid->dx/(2*Physics->Z[ix + (iy )*Grid->nxEC ]);
} else {
INeigh = 0;
printf("error internal BC are not properly taken into account yet. (Neumann Vy)\n");
exit(0);
}
}
else { // on a ghost node
// Get neighbours index
if (ix==0) { // left boundary
INeigh = Numbering->map[ ix+1 + (iy)*Grid->nxVy + Grid->nVxTot ];
} else if (ix==Grid->nxVy-1) { // right boundary
INeigh = Numbering->map[ ix-1 + (iy)*Grid->nxVy + Grid->nVxTot ];
} else {
INeigh = 0;
printf("error internal BC are not properly taken into account yet. (Ghost Vy)\n");
exit(0);
}
scale = 1.0;//EqSystem->S[INeigh];
if (BC->type[IBC]==DirichletGhost) { // Dirichlet
Physics->Vy[I] = 2.0*BC->value[IBC] - EqSystem->x[INeigh]*scale;
}
else if (BC->type[IBC]==NeumannGhost) { // Neumann
if (ix==0) // left boundary
Physics->Vy[I] = EqSystem->x[INeigh]*scale - BC->value[IBC]/Physics->ZShear[0 + iy*Grid->nxS]*Grid->dx;
if (ix==Grid->nxVy-1) // right boundary
Physics->Vy[I] = EqSystem->x[INeigh]*scale + BC->value[IBC]/Physics->ZShear[Grid->nxS-1 + iy*Grid->nxS]*Grid->dx;
}
else {
printf("error: unknown boundary type\n");
exit(0);
}
}
}
if (isnan(Physics->Vy[I])) {
printf("nan found in Vy, InoDir = %i \n", InoDir);
if (InoDir<0) {
IBC = abs(InoDir)-1;
printf("BC->type[IBC] = %i,ix = %i, iy = %i, BCValue =%.2e, ZshearR = %.2e\n",BC->type[IBC], ix, iy, BC->value[IBC],Physics->ZShear[Grid->nxS-1 + iy*Grid->nxS]);
}
//exit(0);
}
}
}
compute maxVx, maxVy;
compute Vx, Vy;
maxVx = 0.0;
maxVy = 0.0;
for (iy = 1; iy<Grid->nyEC-1; iy++) {
for (ix = 1; ix<Grid->nxEC-1; ix++) {
Vx = (Physics->Vx[ix-1+ iy *Grid->nxVx]+Physics->Vx[ix+ iy *Grid->nxVx])/2.0;
Vy = (Physics->Vy[ix + (iy-1)*Grid->nxVy]+Physics->Vx[ix+ (iy-1)*Grid->nxVy])/2.0;
maxVx = fmax(maxVx, fabs(Vx));
maxVy = fmax(maxVy, fabs(Vy));
}
}
Physics->maxVx = maxVx;
Physics->maxVy = maxVy;
}
#if (INERTIA)
void Physics_VelOld_POld_updateGlobal (Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
// A better method would be to intervert the pointers;
int i;
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVxTot; ++i) {
Physics->Vx0[i] = Physics->Vx[i];
}
#pragma omp parallel for private(i) OMP_SCHEDULE
for (i = 0; i < Grid->nVyTot; ++i) {
Physics->Vy0[i] = Physics->Vy[i];
}
}
#endif
void Physics_P_retrieveFromSolution(Model* Model)
{
Physics* Physics = &(Model->Physics);
Grid* Grid = &(Model->Grid);
BC* BCStokes = &(Model->BCStokes);
EqSystem* EqStokes = &(Model->EqStokes);
Numbering* NumStokes = &(Model->NumStokes);
int iCell;
#if (!DARCY)
// /!\ For visu it's better if all sides are Neumann
Physics_CellVal_retrieveFromSolution (Physics->P, 2, Grid, BCStokes, NumStokes, EqStokes);
// Shift pressure, taking the pressure of the upper left cell (inside) as reference (i.e. 0)
compute RefPressure = 0.0;// = Physics->P[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];// - 1.0;//Physics->P[1 + (Grid->nyEC-2)*Grid->nxEC];//Physics->P[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];
int ix;
for (ix=0;ix<Grid->nxEC;++ix) {
//RefPressure += Physics->P[ix+(Grid->nyEC-2)*Grid->nxEC];
RefPressure += Physics->P[ix+(BCStokes->iyTopRow-1)*Grid->nxEC];
}
RefPressure/=Grid->nxEC;
//compute RefPressure = 0.0;
//compute RefPressure = Physics->P[1 + (Grid->nyEC-2)*Grid->nxEC];// - 1.0;//Physics->P[1 + (Grid->nyEC-2)*Grid->nxEC];//Physics->P[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];
/*
compute meanP = 0.0;
compute minP = 1e100;
compute maxP = -1e100;
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
meanP += Physics->P [iCell];
maxP = fmax(maxP, Physics->P [iCell]);
minP = fmin(minP, Physics->P [iCell]);
}
meanP/= (compute)Grid->nECTot;
RefPressure = meanP;
//printf("meanP = %.2e, minP = %.2e, maxP = %.2e\n",meanP, minP, maxP);
*/
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->P [iCell] = Physics->P [iCell] - RefPressure + Physics->Pback;
}
int iy;
for (iy=BCStokes->iyTopRow;iy<Grid->nyEC;++iy) {
for (ix=0;ix<Grid->nxEC;++ix) {
iCell = ix + iy*Grid->nxEC;
Physics->P[iCell] = Physics->Pback; // Just for visualization, doesn't contribute to the solution
}
}
#else
int i;
Physics_CellVal_retrieveFromSolution (Physics->Pf, 2, Grid, BCStokes, NumStokes, EqStokes);
Physics_CellVal_retrieveFromSolution (Physics->Pc, 3, Grid, BCStokes, NumStokes, EqStokes);
// Shift pressure, taking the pressure of the upper left cell (inside) as reference (i.e. 0)
// Ref = average top row
//compute RefPressure = Physics->Pf[Grid->nxEC/2 + (Grid->nyEC-2)*Grid->nxEC];
/*
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix + (Grid->nyEC-2)*Grid->nxEC;
RefPressure += Physics->Pf[iCell];
}
RefPressure /= Grid->nxEC;
*/
/*
compute RefPressure = 0.0;//Physics->Pf[1 + (Grid->nyEC-2)*Grid->nxEC];
for (iy = 0; iy < Grid->nyEC-1; ++iy) {
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix + iy*Grid->nxEC;
Physics->Pf [iCell] = Physics->Pf [iCell] - RefPressure;
}
}
RefPressure = 0.0;//Physics->Pc[1 + (Grid->nyEC-2)*Grid->nxEC];
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->Pc [iCell] = Physics->Pc [iCell] - RefPressure;
}
*/
// Fill P, the total pressure
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->P[iCell] = Physics->Pc[iCell] + Physics->Pf[iCell];
}
#endif
}
#if (HEAT)
void Physics_T_retrieveFromSolution(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
BC* BCThermal = &(Model->BCThermal);
Numbering* NumThermal = &(Model->NumThermal);
EqSystem* EqThermal = &(Model->EqThermal);
Physics_CellVal_retrieveFromSolution (Physics->T, 0, Grid, BCThermal, NumThermal, EqThermal);
}
#endif
void Physics_Sigma0_updateGlobal_fromGrid(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int ix, iy, iCell, iNode;
#pragma omp parallel for private(iy, ix, iCell) OMP_SCHEDULE
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
Physics->sigma_xx_0[iCell] += Physics->Dsigma_xx_0[iCell];
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->sigma_xx_0, Grid);
#pragma omp parallel for private(iy, ix, iNode) OMP_SCHEDULE
for (iy = 0; iy < Grid->nyS; ++iy) {
for (ix = 0; ix < Grid->nxS; ++ix) {
iNode = ix + iy*Grid->nxS;
Physics->sigma_xy_0[iNode] += Physics->Dsigma_xy_0[iNode];
}
}
}
void Physics_Dsigma_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
BC* BC = &(Model->BCStokes);
Numerics* Numerics = &(Model->Numerics);
// see Taras' book p. 186
int ix, iy, iCell, iNode;
compute Z;
compute Eps_xx, Eps_xy;
compute dVxdy, dVydx, dVxdx, dVydy;
compute G;
compute dt = Physics->dt;
printf("dt = %.2e, dtAdv= %.2e\n", Physics->dt, Physics->dtAdv);
//#pragma omp parallel for private(iy, ix, iCell, dVxdx, dVydy, Eps_xx) OMP_SCHEDULE
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx;
dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy;
Eps_xx = 0.5*(dVxdx-dVydy);
//Physics->Dsigma_xx_0[iCell] = 2.0 * Physics->Z[iCell]*(Eps_xx + Physics->sigma_xx_0[iCell]/(2.0*Physics->G[iCell]*dt)) - Physics->sigma_xx_0[iCell];
compute SxxVE = 2.0 * Physics->Z[iCell]*(Eps_xx + Physics->sigma_xx_0[iCell]/(2.0*Physics->G[iCell]*dt));
Physics->Dsigma_xx_0[iCell] = SxxVE*Physics->Lambda[iCell] - Physics->sigma_xx_0[iCell];
//Physics->Dsigma_xx_0[iCell] = SxxVE - Physics->sigma_xx_0[iCell];
#if (USE_UPPER_CONVECTED)
/*
// upper convected correction for the rotation of stresses
compute sigma_xy_0 = Interp_NodeVal_Node2Cell_Local(Physics->sigma_xy_0,ix,iy,Grid->nxS);
// Anton's trick
dVxdy = 0.0;
compute Sxy_x_Dvxdy = 0.0;
int iN, Ix, Iy;
int IxMod[4] = {0,1,1,0}; // lower left, lower right, upper right, upper left
int IyMod[4] = {0,0,1,1};
for (iN = 0; iN < 4; ++iN) {
Ix = (ix-1)+IxMod[iN];
Iy = (iy-1)+IyMod[iN];
dVxdy += 0.25*( Physics->Vx[(Ix )+(Iy+1)*Grid->nxVx]
- Physics->Vx[(Ix )+(Iy )*Grid->nxVx] )/Grid->dy;
Sxy_x_Dvxdy += 0.25*Physics->sigma_xy_0[Ix+Iy*Grid->nxS]*( Physics->Vx[(Ix )+(Iy+1)*Grid->nxVx]
- Physics->Vx[(Ix )+(Iy )*Grid->nxVx] )/Grid->dy;
}
//Physics->Dsigma_xx_0[iCell] += 2.0 * Physics->Z[iCell]/(Physics->G[iCell])*(Physics->sigma_xx_0[iCell]*dVxdx + sigma_xy_0*dVxdy );
Physics->Dsigma_xx_0[iCell] += 2.0 * Physics->Z[iCell]/(Physics->G[iCell])*(Physics->sigma_xx_0[iCell]*dVxdx + Sxy_x_Dvxdy );
*/
#endif
//Physics->Dsigma_xx_0[iCell] *= Physics->dtAdv/Physics->dt; // To update by the right amount according to the time step
if (isnan(Physics->Dsigma_xx_0[iCell])) {
printf("isnan Physics->Dsigma_xx_0[iCell]\n");
}
if (Numerics->timeStep>0) {
//Physics->Dsigma_xx_0[iCell] = 0.5*Physics->dtAdv* (Physics->Dsigma_xx_0[iCell]/Physics->dtAdv + Ds0_old/Physics->dtAdv0); // Crank-Nicolson, buggy!!
}
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Dsigma_xx_0, Grid);
//#pragma omp parallel for private(iy, ix, iNode,dVxdy, dVydx, Eps_xy, G, Z) OMP_SCHEDULE
for (iy = 0; iy < Grid->nyS; ++iy) {
for (ix = 0; ix < Grid->nxS; ++ix) {
iNode = ix + iy*Grid->nxS;
dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy;
dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx;
Eps_xy = 0.5*(dVxdy+dVydx);
//G = Interp_ECVal_Cell2Node_Local(Physics->G, ix, iy, Grid->nxEC);
G = Physics->GShear[iNode];
Z = Physics->ZShear[iNode];
//Physics->Dsigma_xy_0[iNode] = 2.0*Z * (Eps_xy + Physics->sigma_xy_0[iNode]/(2.0*G*dt)) - Physics->sigma_xy_0[iNode];
compute SxyVE = 2.0*Z * (Eps_xy + Physics->sigma_xy_0[iNode]/(2.0*G*dt));
Physics->Dsigma_xy_0[iNode] = SxyVE*Physics->LambdaShear[iNode] - Physics->sigma_xy_0[iNode];
#if (USE_UPPER_CONVECTED)
/*
compute sigma_xx_0 = Interp_ECVal_Cell2Node_Local(Physics->sigma_xx_0,ix,iy,Grid->nxEC);
Physics->Dsigma_xy_0[iNode] += 1.0*Z/G * (sigma_xx_0*(dVydx-dVxdy));
*/
#endif
Physics->Dsigma_xy_0[iNode] *= Physics->dtAdv/Physics->dt;
if (isnan(Physics->Dsigma_xy_0[iNode])) {
printf("isnan Physics->Dsigma_xy_0[iNode]\n");
}
if (Numerics->timeStep>0) {
//Physics->Dsigma_xy_0[iNode] = 0.5*Physics->dtAdv* (Physics->Dsigma_xy_0[iNode]/Physics->dtAdv + Ds0_old/Physics->dtAdv0); // Crank-Nicolson
}
// Ensure free slip
if (ix==0 && BC->IsFreeSlipLeft) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
if (ix==Grid->nxS && BC->IsFreeSlipRight) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
if (iy == 0 && BC->IsFreeSlipBot) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
if (iy==Grid->nyS && BC->IsFreeSlipTop) {
Physics->Dsigma_xy_0[iNode] = 0.0;
}
}
}
#if (DARCY)
compute Bulk, Zb, divV, DeltaP0, DeltaP;
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
phi = Physics->phi[iCell];
Bulk = Physics->G[iCell]/sqrt(phi);
divV = ( Physics->Vx[ix+iy*Grid->nxVx] - Physics->Vx[ix-1+ iy *Grid->nxVx] )/Grid->dx;
divV += ( Physics->Vy[ix+iy*Grid->nxVy] - Physics->Vy[ix +(iy-1)*Grid->nxVy] )/Grid->dy;
DeltaP0 = Physics->DeltaP0[iCell];
Zb = Physics->Zb[iCell];
DeltaP = Zb * ( - divV + DeltaP0/(Bulk*dt) ); // Pc
Physics->DDeltaP[iCell] = DeltaP - Physics->DeltaP0[iCell];
Physics->DDeltaP[iCell] *= Physics->dtAdv/Physics->dt;
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->DDeltaP, Grid);
#endif
}
compute Physics_sigma_xxVE_getLocalCell(Model* Model, int ix, int iy) {
// Where ix and iy are the indices of a Cell
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iCell = ix + iy*Grid->nxEC;
compute dt = Physics->dt;
compute dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx] - Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx;
compute dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy] - Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy;
compute Eps_xx = 0.5*(dVxdx-dVydy);
return 2.0 * Physics->Z[iCell]*(Eps_xx + Physics->sigma_xx_0[iCell]/(2.0*Physics->G[iCell]*dt));
}
compute Physics_sigma_xyVE_getLocalNode(Model* Model, int ix, int iy) {
// Where ix and iy are the indices of a Node
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iNode = ix + iy*Grid->nxS;
compute dt = Physics->dt;
compute dVxdy = ( Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix + (iy )*Grid->nxVx] )/Grid->dy;
compute dVydx = ( Physics->Vy[ix+1+ iy*Grid->nxVy] - Physics->Vy[ix + iy*Grid->nxVy] )/Grid->dx;
compute Eps_xy = 0.5*(dVxdy+dVydx);
//G = Interp_ECVal_Cell2Node_Local(Physics->G, ix, iy, Grid->nxEC);
compute G = Physics->GShear[iNode];
compute Z = Physics->ZShear[iNode];
return 2.0*Z * (Eps_xy + Physics->sigma_xy_0[iNode]/(2.0*G*dt)) - Physics->sigma_xy_0[iNode];
}
void Physics_StrainRateInvariant_getLocalCell(Model* Model, int ix, int iy, compute* EII)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
compute dVxdy, dVydx, dVxdx, dVydy;
compute ShearComp_sqr;
int iNode, Ix, Iy;
int IxMod[4] = {0,1,1,0}; // lower left, lower right, upper right, upper left
int IyMod[4] = {0,0,1,1};
dVxdx = (Physics->Vx[(ix) + (iy)*Grid->nxVx]
- Physics->Vx[(ix-1) + (iy)*Grid->nxVx])/Grid->dx;
dVydy = (Physics->Vy[(ix) + (iy)*Grid->nxVy]
- Physics->Vy[(ix) + (iy-1)*Grid->nxVy])/Grid->dy;
// Method A: using the averaging of derivatives on the four nodes
// Compute Eps_xy at the four nodes of the cell
// 1. Sum contributions
dVxdy = 0;
dVydx = 0;
ShearComp_sqr = 0.0;
for (iNode = 0; iNode < 4; ++iNode) {
Ix = (ix-1)+IxMod[iNode];
Iy = (iy-1)+IyMod[iNode];
dVxdy = ( Physics->Vx[(Ix )+(Iy+1)*Grid->nxVx]
- Physics->Vx[(Ix )+(Iy )*Grid->nxVx] )/Grid->dy;
dVydx = ( Physics->Vy[(Ix+1)+(Iy )*Grid->nxVy]
- Physics->Vy[(Ix )+(Iy )*Grid->nxVy] )/Grid->dx;
//printf("koko\n");
ShearComp_sqr += (0.5*(dVxdy+dVydx))*(0.5*(dVxdy+dVydx)) ;
}
*EII = sqrt( (0.5*(dVxdx-dVydy))*(0.5*(dVxdx-dVydy)) + 0.25*ShearComp_sqr );
}
void Physics_StrainRateInvariant_getLocalNode(Model* Model, int ix, int iy, compute* EII)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
// Be careful, Anton's trick not in!!
compute dVxdy, dVydx, dVxdx, dVydy;
dVxdy = (Physics->Vx[(ix ) + (iy+1)*Grid->nxVx]
- Physics->Vx[(ix ) + (iy )*Grid->nxVx])/Grid->dy;
dVydx = (Physics->Vy[(ix+1) + (iy )*Grid->nxVy]
- Physics->Vy[(ix ) + (iy )*Grid->nxVy])/Grid->dx;
compute dVxdxCell[4], dVydyCell[4]; // order: NE, NW, SW, SE
// use Anton's trick for the inner nodes
if (ix>0 && ix<Grid->nxS-1 && iy>0 && iy<Grid->nyS-1) {
dVxdxCell[0] = (Physics->Vx[(ix+1)+(iy+1)*Grid->nxVx] - Physics->Vx[(ix )+(iy+1)*Grid->nxVx])/Grid->dx;
dVxdxCell[1] = (Physics->Vx[(ix )+(iy+1)*Grid->nxVx] - Physics->Vx[(ix-1)+(iy+1)*Grid->nxVx])/Grid->dx;
dVxdxCell[2] = (Physics->Vx[(ix )+(iy )*Grid->nxVx] - Physics->Vx[(ix-1)+(iy )*Grid->nxVx])/Grid->dx;
dVxdxCell[3] = (Physics->Vx[(ix+1)+(iy )*Grid->nxVx] - Physics->Vx[(ix )+(iy )*Grid->nxVx])/Grid->dx;
dVydyCell[0] = (Physics->Vy[(ix+1)+(iy+1)*Grid->nxVy] - Physics->Vy[(ix+1)+(iy )*Grid->nxVy])/Grid->dy;
dVydyCell[1] = (Physics->Vy[(ix )+(iy+1)*Grid->nxVy] - Physics->Vy[(ix )+(iy )*Grid->nxVy])/Grid->dy;
dVydyCell[2] = (Physics->Vy[(ix )+(iy )*Grid->nxVy] - Physics->Vy[(ix )+(iy-1)*Grid->nxVy])/Grid->dy;
dVydyCell[3] = (Physics->Vy[(ix+1)+(iy )*Grid->nxVy] - Physics->Vy[(ix+1)+(iy-1)*Grid->nxVy])/Grid->dy;
compute NormalComp_sqr = 0.0;
int iCell;
for (iCell = 0; iCell < 4; ++iCell) {
dVxdx = dVxdxCell[iCell];
dVydy = dVydyCell[iCell];
NormalComp_sqr += 0.25*(0.5*(dVxdx-dVydy))*(0.5*(dVxdx-dVydy)) ;
}
*EII = sqrt( NormalComp_sqr + (0.5*(dVxdy+dVydx))*(0.5*(dVxdy+dVydx)) );
} else {
if (Grid->isPeriodic) {
if (ix == 0 || ix == Grid->nxS-1) {
dVxdx = ( Physics->Vx[(1)+(iy+1)*Grid->nxVx] - Physics->Vx[(Grid->nxVx-1 -1)+(iy+1)*Grid->nxVx] +
Physics->Vx[(1)+(iy )*Grid->nxVx] - Physics->Vx[(Grid->nxVx-1 -1)+(iy )*Grid->nxVx] )/4./Grid->dx;
}
else {
dVxdx = 0.0;
printf("error in Physics_StrainRateInvariant_getLocalNode. Shouldn't come to this condition\n");
}
}
else {
if (ix == 0) {
dVxdx = ( Physics->Vx[(ix+1)+(iy+1)*Grid->nxVx] - Physics->Vx[(ix )+(iy+1)*Grid->nxVx] +
Physics->Vx[(ix+1)+(iy )*Grid->nxVx] - Physics->Vx[(ix )+(iy )*Grid->nxVx] )/2./Grid->dx;
} else if (ix == Grid->nxS-1) {
dVxdx = ( Physics->Vx[(ix )+(iy+1)*Grid->nxVx] - Physics->Vx[(ix-1)+(iy+1)*Grid->nxVx] +
Physics->Vx[(ix )+(iy )*Grid->nxVx] - Physics->Vx[(ix-1)+(iy )*Grid->nxVx] )/2./Grid->dx;
} else {
dVxdx = ( Physics->Vx[(ix+1)+(iy+1)*Grid->nxVx] - Physics->Vx[(ix-1)+(iy+1)*Grid->nxVx] +
Physics->Vx[(ix+1)+(iy )*Grid->nxVx] - Physics->Vx[(ix-1)+(iy )*Grid->nxVx] )/4./Grid->dx;
}
}
if (iy == 0) {
dVydy = ( Physics->Vy[(ix+1)+(iy+1)*Grid->nxVy] - Physics->Vy[(ix+1)+(iy )*Grid->nxVy] +
Physics->Vy[(ix )+(iy+1)*Grid->nxVy] - Physics->Vy[(ix )+(iy )*Grid->nxVy] )/2./Grid->dy;
} else if (iy == Grid->nyS-1) {
dVydy = ( Physics->Vy[(ix+1)+(iy )*Grid->nxVy] - Physics->Vy[(ix+1)+(iy-1)*Grid->nxVy] +
Physics->Vy[(ix )+(iy )*Grid->nxVy] - Physics->Vy[(ix )+(iy-1)*Grid->nxVy] )/2./Grid->dy;
} else {
dVydy = ( Physics->Vy[(ix+1)+(iy+1)*Grid->nxVy] - Physics->Vy[(ix+1)+(iy-1)*Grid->nxVy] +
Physics->Vy[(ix )+(iy+1)*Grid->nxVy] - Physics->Vy[(ix )+(iy-1)*Grid->nxVy] )/4./Grid->dy;
}
// the top and bottom row should never be needed
*EII = sqrt( (0.5*(dVxdy+dVydx))*(0.5*(dVxdy+dVydx)) + (0.5*(dVxdx-dVydy))*(0.5*(dVxdx-dVydy)) );
}
}
compute Physics_StressInvariant_getLocalCell(Model* Model, int ix, int iy)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iCell = ix + iy*Grid->nxEC;
#if (DARCY)
compute phi = Physics->phi[iCell];
#else
compute phi = 0.0;
#endif
return (1.0-phi)*2.0*Physics->Z[iCell]*Physics->EII_eff[iCell]*Physics->Lambda[iCell];
}
compute Physics_StressInvariant_getLocalNode(Model* Model, int ix, int iy)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iNode = ix + iy*Grid->nxS;
#if (DARCY)
compute phi = Interp_ECVal_Cell2Node_Local(Physics->phi, ix, iy, Grid->nxEC);
#else
compute phi = 0.0;
#endif
return (1.0-phi)*2.0*Physics->ZShear[iNode]*Physics->EII_effShear[iNode]*Physics->LambdaShear[iNode];
}
void Physics_dt_update(Model* Model) {
// Time step selection based on the analytical solution of the visco-elastic build up
// Stress build up equation:
// Sxx = 2*eta*Exx * ( 1 - exp(-G/eta*t) ) [1]
// Derivative wrt time:
// dSxx/dt = 2*G*Exx * exp(-G/eta*t) [2]
// Solution for the time at a given stress Sxx0:
// t = eta/G * ln(2*eta*Exx / (2*eta*Exx - Sxx0))
// can be rewritten:
// t = tM * ln(1/(1-Sxx0/SxxV_max)
// with the maxwell time tM = eta/G
// and the maximum viscous stress (when the viscous strain rate is equal to the total strain rate): SxxV_max = (2*eta*Exx)
// Let's limit the time step size based on the increment of strain.
// The increment of strain is given by:
// DeltaSxx = dSxx/dt * dt // where dt is the time step size
// Then from eq. 2:
// dt = DeltaSxx / (2*G*Exx * exp(-G/eta*t)) [3]
// DeltaSxx can be chosen, for example as a fraction of the maximum viscous stress or of the yield stress.
// SxxLimit = min(SxxV_max,Syield)
// DeltaSxx = SxxLimit/n, where n is a non dimensional number representing the fraction of stress
#if (DARCY)
printf("Time step size selection method not yet adapted to Darcy\n");
exit(0);
#endif
// Here comes the implementation
Physics* Physics = &(Model->Physics);
Grid* Grid = &(Model->Grid);
MatProps* MatProps = &(Model->MatProps);
Numerics* Numerics = &(Model->Numerics);
Char* Char = &(Model->Char);
if (Numerics->dtMin!=Numerics->dtMax) {
SinglePhase* thisPhaseInfo;
compute weight, sumOfWeights;
compute cohesion, frictionAngle;
compute P;
compute Sigma_v_max; // maximum viscous stress (if total strain rate = viscous strain rate)
compute Sigma_yield;
compute Sigma_limit;
int phase;
compute sq_sigma_xy0, sigma_xx0, sigmaII0;
compute DeltaSigma;
compute new_dt = 1e200;
compute dtOld = Physics->dt;
compute EII, sigmaII;
compute smallest_dt = 1e100;
int ix, iy, iCell;
compute eta;
//compute DeltaSigma_Max = 0.0;
compute DeltaSigma_min = Numerics->deltaSigmaMin;
//compute stressFac = 1.0;//fmax(0.0,Numerics->dt_stressFac-Numerics->deltaSigmaMin);
//compute stressFac = Numerics->dt_stressFac;
//Numerics->dt_DeltaSigma_min_stallFac = 1e100;
/*
if (Numerics->timeStep<=0) {
Numerics->dt_DeltaSigma_min_stallFac = 1.0;
} else {
if (!fmod(Numerics->stallingCounter+1,5) && EqStokes->normResidual>10.0*Numerics->absoluteTolerance) {
Numerics->dt_DeltaSigma_min_stallFac/=2.0;
} else {
if(Numerics->itNonLin==0) {
Numerics->dt_DeltaSigma_min_stallFac *= 1.25; // slowly recovers
Numerics->dt_DeltaSigma_min_stallFac = fmin(1.0,Numerics->dt_DeltaSigma_min_stallFac);
}
}
}
Numerics->dt_DeltaSigma_min_stallFac = fmax(Numerics->dt_DeltaSigma_min_stallFac, 1e-3);
*/
compute P_E, EP_E, V_E, VP_E, VP_EP;
compute counter = 0;
compute av_EP_E = 0.0;
compute minP_E = 1e100;
compute minEP_E = 1e100;
compute minV_E = 1e100;
compute minVP_E = 1e100;
compute minVP_EP = 1e100;
compute maxEP_E = 0.0;
bool somethingIsPlastic = false;
compute refTime_noPlast;
//compute refTime_Plast;
compute minRefTime_noPlast = 1e100;
compute maxRefTime_noPlast = 0.0;
for (iy=1;iy<Grid->nyEC-1; ++iy) {
for (ix=1;ix<Grid->nxEC-1; ++ix) {
iCell = ix +iy*Grid->nxEC;
if (MatProps->use_dtMaxwellLimit[Physics->phase[iCell]] && Physics->khi[iCell] > 1e29) {
eta = Physics->eta[iCell];
// Compute sigmaII0
sq_sigma_xy0 = Physics->sigma_xy_0[ix-1+(iy-1)*Grid->nxS] * Physics->sigma_xy_0[ix-1+(iy-1)*Grid->nxS];
sq_sigma_xy0 += Physics->sigma_xy_0[ix +(iy-1)*Grid->nxS] * Physics->sigma_xy_0[ix +(iy-1)*Grid->nxS];
sq_sigma_xy0 += Physics->sigma_xy_0[ix-1+(iy )*Grid->nxS] * Physics->sigma_xy_0[ix-1+(iy )*Grid->nxS];
sq_sigma_xy0 += Physics->sigma_xy_0[ix +(iy )*Grid->nxS] * Physics->sigma_xy_0[ix +(iy )*Grid->nxS];
sigma_xx0 = Physics->sigma_xx_0[iCell];// + Physics->Dsigma_xx_0[iCell];
sigmaII0 = sqrt((sigma_xx0)*(sigma_xx0) + 0.25*(sq_sigma_xy0));
// Compute sigmaII
sigmaII = Physics_StressInvariant_getLocalCell(Model, ix, iy);
// Get cohesion and frictionAngle
if (Numerics->timeStep<=0 && Numerics->itNonLin<1) {
EII = 1.0; // The reference strain in this case is (1/Char.time) / Char.time = 1.0
Sigma_limit = 2.0*eta*EII/1000.0;
//printf("Svmax = %.2e, Syield = %.2e, Slimit = %.2e, cohesion = %.2e, frictionAngle = %.2e, P = %.2e\n", Sigma_v_max, Sigma_yield, Sigma_limit, cohesion, frictionAngle, P);
} else {
// Compute EII
Physics_StrainRateInvariant_getLocalCell(Model, ix, iy, &EII);
// Get stress limit
if (0) {
if (Physics->khi[iCell]<1e29) {
Sigma_v_max = 2.0*eta*EII;
Sigma_yield = Physics->Tau_y[iCell];
Sigma_limit = fmin(Sigma_v_max,Sigma_yield);
} else {
Sigma_limit = 2.0*eta*EII*1.0;
}
} else {
Sigma_v_max = 2.0*eta*EII;
Sigma_yield = Physics->Tau_y[iCell];
Sigma_limit = Sigma_yield;
}
}
compute dSigma = fabs(sigmaII - sigmaII0);
if (sigmaII>Sigma_limit) {
//printf("SII>Slim!! sigmaII = %.2e, sigma_limit = %.2e, P = %.2e, Sigma_v_max = %.2e,Sigma_yield = %.2e\n", sigmaII, Sigma_limit, P, Sigma_v_max,Sigma_yield);
sigmaII = Sigma_limit; // because the time step is updated before the viscosity, so stress can be a bit higher than the yield at that moment.
}
// Get DeltaSigma
//DeltaSigma = Sigma_limit*stressFac;
DeltaSigma = DeltaSigma_min;//stressFac * (Sigma_limit-sigmaII)/Sigma_limit + DeltaSigma_min;
//DeltaSigma = 0.05 * (Sigma_limit-sigmaII)/Sigma_limit + DeltaSigma_min;
//DeltaSigma *= Numerics->dt_DeltaSigma_min_stallFac;
new_dt = dtOld * (DeltaSigma/dSigma);
if (new_dt<0) {
printf("DeltaSigma = %.2e, Sigma_limit = %.2e, sigmaII = %.2e, dSigma = %.2e\n", DeltaSigma, Sigma_limit, sigmaII, dSigma);
exit(0);
}
// compute the corresponding time in the analytical solution
//refTime_noPlast = eta/Physics->G[iCell] * log(2*eta*EII / (2*eta*EII - sigmaII0 ));
refTime_noPlast = eta/Physics->G[iCell] * log(2.0*eta*EII / (2.0*eta*EII - Sigma_limit ));
minRefTime_noPlast = fmin(minRefTime_noPlast,refTime_noPlast);
maxRefTime_noPlast = fmax(maxRefTime_noPlast,refTime_noPlast);
// compute dt using eq. [3]
//dt = DeltaSigma / (2*G*EII * exp(-G/eta*t));
//if (new_dt<smallest_dt) {
//DeltaSigma_Max = dSigma;
//iyLim = iy;
//printf("DeltaSigma = %.2e, dSigma = %.2e, new_dt = %.2e, smallest_dt = %.2e, Physics->dt = %.2e\n",DeltapSigma, dSigma, new_dt, smallest_dt, Physics->dt);
//}
smallest_dt = fmin(smallest_dt, new_dt);
V_E = (Physics->eta[iCell]) / (Physics->G[iCell]);
minV_E = fmin(minV_E ,V_E);
} else if (MatProps->use_dtMaxwellLimit[Physics->phase[iCell]] && Physics->khi[iCell] <= 1e29) {
V_E = (Physics->eta[iCell]) / (Physics->G[iCell]);
minV_E = fmin(minV_E ,V_E);
somethingIsPlastic = true;
EP_E = (1.0/(1.0/(Physics->G[iCell]*Physics->dt) + 1.0/Physics->khi[iCell])) / (Physics->G[iCell]);
minEP_E = fmin(minEP_E ,EP_E);
maxEP_E = fmax(maxEP_E ,EP_E);
av_EP_E += EP_E;
counter += 1.0;
P_E = (Physics->khi[iCell]) / (Physics->G[iCell]);
minP_E = fmin(minP_E ,P_E);
VP_EP = (1.0/(1.0/(Physics->eta[iCell]) + 1.0/Physics->khi[iCell])) / (1.0/(1.0/Physics->G[iCell] + Physics->dt/Physics->khi[iCell]));
minVP_EP = fmin(minVP_EP ,VP_EP);
VP_E = (1.0/(1.0/(Physics->eta[iCell]) + 1.0/Physics->khi[iCell])) / (Physics->G[iCell]);
minVP_E = fmin(minVP_E ,VP_E);
//printf("VP_E = %.2e, EP_E = %.2e\n",VP_E, EP_E);
}
}
}
av_EP_E /= counter;
Physics->dt = (smallest_dt+Physics->dt)/2.0;
compute dtStress = smallest_dt;
if (smallest_dt==1e100) { // unlikely case where everything is breaking
smallest_dt = dtOld;
printf("The unlikely happened\n");
}
/*
if (Numerics->timeStep <= 0) {
Numerics->dtCorr = dtOld;
Numerics->dtPrevCorr = Numerics->dtCorr;
Numerics->dtAlphaCorr = Numerics->dtAlphaCorrIni;
Physics->dt = dtOld;
} else {
Numerics->dtCorr = Numerics->dtAlphaCorr * (smallest_dt-dtOld);
//if (fabs(Numerics->dtCorr)/dtOld<0.05) { // avoids small changes
// Numerics->dtCorr = 0.0;
//}
//printf("Numerics->dtCorr = %.2e, Numerics->dtPrevCorr = %.2e, Ratio = %.2e\n", Numerics->dtCorr, Numerics->dtPrevCorr, Numerics->dtCorr/Numerics->dtPrevCorr);
if (Numerics->dtCorr/Numerics->dtPrevCorr<-0.9) {
Numerics->dtAlphaCorr /= 2.0;
} else {
Numerics->dtAlphaCorr *= 1.25;
}
Numerics->dtAlphaCorr = fmin(Numerics->dtAlphaCorrIni, Numerics->dtAlphaCorr);
Physics->dt = dtOld + Numerics->dtCorr;
Numerics->dtPrevCorr = Numerics->dtCorr;
}
printf("dtNow = %.2e, Numerics->dtCorr = %.2e, smallest_dt = %2e., dtOld = %.2e\n", Physics->dt, Numerics->dtCorr, smallest_dt, dtOld);
//Physics->dt = dtOld;
*/
Numerics->lsGoingDown = false;
Numerics->lsGoingUp = false;
compute tol = 0.001;
printf("(Physics->dt-dtOld)/dtOld = %.2e, dt = %.2e, dtOld = %.2e\n", (Physics->dt-dtOld)/Physics->dt, Physics->dt, dtOld);
if ((Physics->dt-dtOld)/dtOld<-tol) { // going down
Numerics->lsGoingDown = true;
printf("going down0\n");
} else { // going up
Numerics->lsGoingUp = true;
}
Physics->dt = fmin(Numerics->dtMax, Physics->dt);
Physics->dt = fmax(Numerics->dtMin, Physics->dt);
// dtAdv
Physics->dtAdv = Numerics->CFL_fac_Stokes*Grid->dx/(Physics->maxVx); // note: the min(dx,dy) is the char length, so = 1
Physics->dtAdv = fmin(Physics->dtAdv, Numerics->CFL_fac_Stokes*Grid->dy/(Physics->maxVy));
compute dtAdvAlone = Physics->dtAdv;
//Physics->dtAdv = fmin(Physics->dtAdv, Physics->dt);
//Physics->dtAdv = fmax(Physics->dtAdv, 0.001*dtAdvAlone);
compute alpha_lim = 5.0*PI/180.0;
compute dtRot;
compute dtRotMin = 1e100;
compute omega;
if (Numerics->timeStep>0) {
// Compute the Alpha array
// add a condi ztion with signX signY to avoid recomputing alpha if not necessary
#pragma omp parallel for private(iy, ix) OMP_SCHEDULE
for (iy=0; iy<Grid->nyS; iy++) {
for (ix=0; ix<Grid->nxS; ix++) {
omega = .5*((Physics->Vy[ix+1 + (iy )*Grid->nxVy] - Physics->Vy[ix +(iy )*Grid->nxVy])/Grid->DXEC[ix]
- (Physics->Vx[ix + (iy+1)*Grid->nxVx] - Physics->Vx[ix +(iy )*Grid->nxVx])/Grid->DYEC[iy]);
dtRot = alpha_lim/fabs(omega);
dtRotMin = fmin(dtRotMin,dtRot);
}
}
Physics->dtAdv = fmin(dtRotMin,Physics->dtAdv);
}
//Physics->dtAdv = fmin(dtStress,Physics->dtAdv);
compute dtPFac = Numerics->dt_plasticFac;
compute dtPlastic = 0.0;
if (dtPFac<1.0) {
dtPlastic = (1.0-dtPFac)*minEP_E+dtPFac*minVP_EP;
} else {
dtPlastic = dtPFac*minVP_EP;
}
if (somethingIsPlastic) {
//compute dtPlastic = 0.99*minVP_EP;
Numerics->subgridStressDiffTimeScale = minVP_EP;
Physics->dtAdv = fmin(Physics->dtAdv,dtPlastic);
} else {
Numerics->subgridStressDiffTimeScale = minV_E; // i.e. Maxwell time
}
if (Numerics->timeStep>0) {
compute ana_Fac = Numerics->dt_stressFac;
Physics->dtAdv = fmin(Physics->dtAdv,ana_Fac*minRefTime_noPlast);
}
if (Numerics->timeStep>0) {
Physics->dtAdv = fmin(2.0*dtOld, Physics->dtAdv);
//Physics->dtAdv = fmax(0.9*dtOld, Physics->dtAdv);
}
Physics->dtAdv = fmin(Numerics->dtMax, Physics->dtAdv);
Physics->dtAdv = fmax(Numerics->dtMin, Physics->dtAdv);
#if (ADV_INTERP)
Physics->dt = Physics->dtAdv;
#else
Physics->dtAdv = Physics->dt;
#endif
compute yr = (3600.0*24.0*365.0);
printf("scaled_dt = %.2e yr, dtMin = %.2e, dtMax = %.2e, Numerics->dtAlphaCorr = %.2e, dtStress = %.2e, dtAdvAlone = %.2e, dtRotMin = %.2e, dtPlastic = %.2e, Physics->dt = %.2e\n", Physics->dt*Char->time/yr, Numerics->dtMin, Numerics->dtMax, Numerics->dtAlphaCorr, dtStress, dtAdvAlone, dtRotMin, dtPlastic, Physics->dt);
printf("minEP/E = %.2e yr, maxEP/E = %.2e yr, avEP_E = %.2e, P/E = %.2e yr, V/E = %.2e yr, VP/E = %.2e yr, VP/EP = %.2e yr, minRefTime_noPlast = %.2e yr, Fac*minRefTime_noPlast = %.2e yr, maxRefTime_noPlast = %.2e yr\n", minEP_E*Char->time/yr, maxEP_E*Char->time/yr, av_EP_E*Char->time/yr, minP_E*Char->time/yr, minV_E*Char->time/yr, minVP_E*Char->time/yr, minVP_EP*Char->time/yr, minRefTime_noPlast*Char->time/yr, Numerics->dt_stressFac*minRefTime_noPlast*Char->time/yr, maxRefTime_noPlast*Char->time/yr);
} else {
Physics->dt = Numerics->dtMin;
Physics->dtAdv = Numerics->dtMin;
}
}
#if (DARCY)
void Physics_Perm_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numerics* Numerics = &(Model->Numerics);
MatProps* MatProps = &(Model->MatProps);
Physics->minPerm = 1E100;
int iy, ix;
int iCell;
compute phi;
compute phiRef = 0.0001;
compute PermEffRef = MatProps->perm0_eta_f[0] * phiRef*phiRef*phiRef * ( (1.0-phiRef)*(1.0-phiRef));
compute perm0;
SinglePhase* thisPhaseInfo;
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
phi = Physics->phi[iCell];
perm0 = 0.0;
thisPhaseInfo = Physics->phaseListHead[iCell];
while (thisPhaseInfo != NULL) {
perm0 += MatProps->perm0_eta_f[thisPhaseInfo->phase] * thisPhaseInfo->weight;
thisPhaseInfo = thisPhaseInfo->next;
}
perm0 /= Physics->sumOfWeightsCells[iCell];
Physics->perm_eta_f[iCell] = perm0 * phi*phi*phi * ( (1.0-phi)*(1.0-phi));
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->perm_eta_f, Grid);
}
void Physics_Phi_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Numerics* Numerics = &(Model->Numerics);
int iy, ix;
int iCell;
compute dt = Physics->dtAdv;
int nxVx = Grid->nxVx;
int nxVy = Grid->nxVy;
compute dx, dy;
compute divV;
compute sum = 0.0;
compute maxDiv = 0;
compute maxPhi = 0;
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix + iy*Grid->nxEC;
dx = Grid->DXS[ix-1];
dy = Grid->DYS[iy-1];
divV = ( Physics->Vx[ix+iy*nxVx] - Physics->Vx[ix-1+ iy *nxVx] )/dx;
divV += ( Physics->Vy[ix+iy*nxVy] - Physics->Vy[ix +(iy-1)*nxVy] )/dy;
Physics->phi[iCell] = Physics->phi0[iCell] + dt*0.5*( (1.0-Physics->phi0[iCell])*Physics->divV0[iCell] + (1.0-Physics->phi[iCell])*divV );
if (Physics->phi[iCell] > Numerics->phiMax) {
Physics->phi[iCell] = Numerics->phiMax;
} else if (Physics->phi[iCell] < Numerics->phiMin) {
Physics->phi[iCell] = Numerics->phiMin;
}
Physics->Dphi[iCell] = Physics->phi[iCell] - Physics->phi0[iCell];
if (fabs(divV)>maxDiv) {
maxDiv = fabs(divV);
}
if (fabs(Physics->phi[iCell])>maxPhi) {
maxPhi = fabs(Physics->phi[iCell]);
}
sum += Physics->phi[iCell];
}
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->phi, Grid);
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->Dphi, Grid);
}
#endif
void Physics_Rho_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
MatProps* MatProps = &(Model->MatProps);
int iCell;
SinglePhase* thisPhaseInfo;
#pragma omp parallel for private(iCell, thisPhaseInfo) OMP_SCHEDULE
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
Physics->rho[iCell] = 0.0;
thisPhaseInfo = Physics->phaseListHead[iCell];
while (thisPhaseInfo != NULL) {
Physics->rho[iCell] += MatProps->rho0[thisPhaseInfo->phase] * thisPhaseInfo->weight;
thisPhaseInfo = thisPhaseInfo->next;
}
Physics->rho[iCell] /= Physics->sumOfWeightsCells[iCell];
#if (DARCY)
Physics->rho[iCell] = (1.0 - Physics->phi[iCell])*Physics->rho[iCell] + Physics->phi[iCell]*Physics->rho_f;
#endif
}
Physics_CellVal_SideValues_copyNeighbours_Global(Physics->rho, Grid);
}
/*
compute Physics_getFromMatProps_ForOneCell(Physics* Physics, compute* ListFromMatProps, MatProps* MatProps, int iCell) {
SinglePhase* thisPhaseInfo;
compute value = 0.0;
thisPhaseInfo = Physics->phaseListHead[iCell];
while (thisPhaseInfo != NULL) {
value += ListFromMatProps[thisPhaseInfo->phase] * thisPhaseInfo->weight;
thisPhaseInfo = thisPhaseInfo->next;
}
return value /= Physics->sumOfWeightsCells[iCell];
}
*/
void Physics_Phase_updateGlobal(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Particles* Particles = &(Model->Particles);
MatProps* MatProps = &(Model->MatProps);
int ix, iy, iCell, iNode;
//coord depth, y;
SingleParticle* thisParticle;
//compute locX, locY;
int IxNode[] = {-1, 0, -1, 0};
int IyNode[] = {-1, -1, 0, 0};
int iPhase;
compute contribPhase[NB_PHASE_MAX];
compute maxContrib;
int phaseAir = Physics->phaseAir;
int phaseWater;
compute contribPhaseAir, contribPhaseWater;
if (Physics->phaseWater==-1) {
phaseWater = Physics->phaseAir;
} else {
phaseWater = Physics->phaseWater;
}
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
iCell = ix+iy*Grid->nxEC;
// Reinitialize contribs
// ===================
for (iPhase=0;iPhase<MatProps->nPhase;++iPhase) {
contribPhase[iPhase] = 0;
}
// Count contribs
// ===================
for (iNode = 0; iNode < 4; ++iNode) {
thisParticle = Particles->linkHead[ix+IxNode[iNode] + (iy+IyNode[iNode])*Grid->nxS];
while (thisParticle != NULL) {
++contribPhase[thisParticle->phase];
thisParticle = thisParticle->next;
}
}
if (phaseAir>-1) {
contribPhaseAir = contribPhase[phaseAir];
}else {
contribPhaseAir = 0.0;
}
if (phaseWater>-1) {
contribPhaseWater = contribPhase[phaseWater];
}else {
contribPhaseWater = 0.0;
}
if (contribPhaseAir>0) {
Physics->phase[iCell] = phaseAir;
} else if (contribPhaseWater>0) {
Physics->phase[iCell] = phaseWater;
} else {
// Find the most prominent phase
// ===================
maxContrib = 0;
for (iPhase=0;iPhase<MatProps->nPhase;++iPhase) {
if (contribPhase[iPhase] > maxContrib) {
Physics->phase[iCell] = iPhase;
maxContrib = contribPhase[iPhase];
}
}
}
/*
// Find the most prominent phase
// ===================
maxContrib = 0;
for (iPhase=0;iPhase<MatProps->nPhase;++iPhase) {
if (contribPhase[iPhase] > maxContrib) {
Physics->phase[iCell] = iPhase;
maxContrib = contribPhase[iPhase];
}
}
*/
}
}
Physics_CellVal_SideValues_copyNeighbours_Global_i(Physics->phase,Grid);
}
void Physics_PhaseList_reinit(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
int iCell;
SinglePhase* temp;
#pragma omp parallel for private(iCell, temp) OMP_SCHEDULE
for (iCell = 0; iCell < Grid->nECTot; ++iCell) {
while (Physics->phaseListHead[iCell]->next!=NULL) {
temp = Physics->phaseListHead[iCell];
Physics->phaseListHead[iCell] = Physics->phaseListHead[iCell]->next;
free(temp);
}
Physics->phaseListHead[iCell]->phase = -1;
Physics->phaseListHead[iCell]->weight = 0.0;
Physics->phaseListHead[iCell]->next = NULL;
}
}
void Physics_check(Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
Char* Char = &(Model->Char);
printf("=== Physics_check ===\n");
int iCell, ix, iy;
compute* Data;
int iData;
int nData = 9;
#if (HEAT)
nData +=1;
#endif
#if (DARCY)
nData +=6;
#endif
compute s = Char->time; // second
compute m = Char->length; // meter
compute kg = Char->mass; // kilogram
#if (HEAT)
compute K = Char->temperature; // Kelvin
// Other units
compute J = kg*m*m/(s*s); // Joule
compute W = kg*m*m/(s*s*s); // Watt
#endif
compute Pa = kg/m/s/s; // Pascal
compute Pas = kg/m/s; // Poise, Pa.s
//compute mol = 1.0;
bool Dim = true;
compute unit = 1.0;
for (iData = 0; iData < nData; ++iData) {
switch (iData) {
case 0:
printf("===== G =====\n");
Data = Physics->G;
if (Dim) unit = Pa;
break;
case 1:
printf("===== eta =====\n");
Data = Physics->eta;
if (Dim) unit = Pas;
break;
case 2:
printf("===== khi =====\n");
Data = Physics->khi;
if (Dim) unit = Pas;
break;
case 3:
printf("===== Z =====\n");
Data = Physics->Z;
if (Dim) unit = Pas;
break;
case 4:
printf("===== rho =====\n");
Data = Physics->rho;
if (Dim) unit = kg/m/m/m ;
break;
case 5:
printf("===== sigma_xx_0 =====\n");
Data = Physics->sigma_xx_0;
if (Dim) unit = Pa;
break;
case 6:
printf("===== Dsigma_xx_0 =====\n");
Data = Physics->Dsigma_xx_0;
if (Dim) unit = Pa;
break;
case 7:
printf("===== sumOfWeightsCells =====\n");
Data = Physics->sumOfWeightsCells;
if (Dim) unit = 1.0;
break;
case 8:
printf("===== P =====\n");
Data = Physics->P;
if (Dim) unit = Pa;
break;
case 9:
#if (HEAT)
printf("===== T =====\n");
Data = Physics->T;
if (Dim) unit = K;
#endif
break;
case 10:
#if (DARCY)
printf("===== phi =====\n");
Data = Physics->phi;
if (Dim) unit = 1.0;
#endif
break;
case 11:
#if (DARCY)
printf("===== Pc =====\n");
Data = Physics->Pc;
if (Dim) unit = Pa;
#endif
break;
case 12:
#if (DARCY)
printf("===== Pf =====\n");
Data = Physics->Pf;
if (Dim) unit = Pa;
#endif
break;
case 13:
#if (DARCY)
printf("===== khi_b =====\n");
Data = Physics->khi_b;
if (Dim) unit = Pas;
#endif
break;
case 14:
#if (DARCY)
printf("===== eta_b =====\n");
Data = Physics->eta_b;
if (Dim) unit = Pas;
#endif
break;
case 15:
#if (DARCY)
printf("===== perm =====\n");
Data = Physics->perm_eta_f;
if (Dim) unit = Physics->eta_f * m*m ;
#endif
break;
}
printf("Char unit = %.2e\n",unit);
for (iy = 0; iy < Grid->nyEC; ++iy) {
for (ix = 0; ix < Grid->nxEC; ++ix) {
iCell = ix+iy*Grid->nxEC;
printf("%.2e ", Data[iCell]*unit);
}
printf("\n");
}
}
}
void Physics_NodeVal_advectEulerian(compute *A, Model* Model)
{
Grid* Grid = &(Model->Grid);
Physics* Physics = &(Model->Physics);
compute* Anew = (compute*) malloc(Grid->nSTot * sizeof(compute));
int ix, iy;
int iC, iN, iS, iW, iE, iVxN, iVxS, iVyW, iVyE;
compute dAdx_W, dAdx_E, dAdy_S, dAdy_N;
compute dx = Grid->dx;
compute dy = Grid->dy;
compute dt = Physics->dt;
compute Vx, Vy;
for (iy = 1; iy < Grid->nyEC-1; ++iy) {
for (ix = 1; ix < Grid->nxEC-1; ++ix) {
// Cell indices
iC = ix + (iy )*Grid->nxS;
iN = ix + (iy+1)*Grid->nxS;
iS = ix + (iy-1)*Grid->nxS;
iW = ix-1 + (iy )*Grid->nxS;
iE = ix+1 + (iy )*Grid->nxS;
iVxS = ix + (iy )+Grid->nxVx;
iVxN = ix + (iy+1)+Grid->nxVx;
iVyW = ix + (iy )*Grid->nxVy;
iVyE = ix+1 + (iy )*Grid->nxVy;
dAdx_W = (A[iC] - A[iW])/dx;
dAdx_E = (A[iE] - A[iC])/dx;
dAdy_S = (A[iC] - A[iS])/dy;
dAdy_N = (A[iN] - A[iC])/dy;
Vx = .5*( Physics->Vx[iVxS] + Physics->Vx[iVxN]);
Vy = .5*( Physics->Vy[iVyW] + Physics->Vy[iVyE]);
Anew[iC] = A[iC] + dt* ( - Vx * .5*(dAdx_W + dAdx_E) - Vy*.5*(dAdy_S + dAdy_N) );
}
}
for (iy = 1; iy < Grid->nyS-1; ++iy) {
for (ix = 1; ix < Grid->nxS-1; ++ix) {
iC = ix + (iy )*Grid->nxS;
A[iC] = Anew[iC];
}
}
// Values should be copied as well
//Physics_CellVal_SideValues_copyNeighbours_Global(A, Grid);
free(Anew);
}
|
DRB065-pireduction-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Classic PI calculation using reduction
*/
#define num_steps 2000000000
#include <stdio.h>
int main(int argc, char** argv)
{
double pi = 0.0;
long int i;
double x, interval_width;
interval_width = 1.0/(double)num_steps;
#pragma omp parallel for reduction(+:pi) private(x)
for (i = 0; i < num_steps; i++) {
x = (i+ 0.5) * interval_width;
pi += 1.0 / (x*x + 1.0);
}
pi = pi * 4.0 * interval_width;
printf ("PI=%f\n", pi);
return 0;
}
|
eavlSimpleReverseIndexOp.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_SIMPLE_REVERSE_INDEX_OP_H
#define EAVL_SIMPLE_REVERSE_INDEX_OP_H
#include "eavlOperation.h"
#include "eavlArray.h"
#include "eavlException.h"
/// like reverse-index op, but assume the output counts
/// can only ever be "1", so we treat the output-count array
/// like a simple boolean flag, and we don't need to generate
/// a reverse subindex array.
static void eavlSimpleReverseIndexOp_CPU(int nInputVals,
int *inOF, int inOFdiv, int inOFmod, int inOFmul, int inOFadd,
int *inOI, int inOIdiv, int inOImod, int inOImul, int inOIadd,
int *outII, int outIImul, int outIIadd)
{
#pragma omp parallel for
for (int i=0; i<nInputVals; i++)
{
int outflag = inOF[((i/inOFdiv)%inOFmod)*inOFmul+inOFadd];
int outindex = inOI[((i/inOIdiv)%inOImod)*inOImul+inOIadd];
if (outflag)
outII[outindex*outIImul+outIIadd] = i;
}
}
#if defined __CUDACC__
__global__ static void eavlSimpleReverseIndexOp_kernel(int nInputVals,
int *inOF, int inOFdiv, int inOFmod, int inOFmul, int inOFadd,
int *inOI, int inOIdiv, int inOImod, int inOImul, int inOIadd,
int *outII, int outIImul, int outIIadd)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = threadID; index < nInputVals; index += numThreads)
{
int outflag = inOF[((index/inOFdiv)%inOFmod)*inOFmul+inOFadd];
int outindex = inOI[((index/inOIdiv)%inOImod)*inOImul+inOIadd];
if (outflag)
outII[outindex*outIImul+outIIadd] = index;
}
}
static void eavlSimpleReverseIndexOp_GPU(int nInputVals,
int *d_inOF, int inOFdiv, int inOFmod, int inOFmul, int inOFadd,
int *d_inOI, int inOIdiv, int inOImod, int inOImul, int inOIadd,
int *d_outII, int outIImul, int outIIadd)
{
int numBlocks = 32;
int numThreads = 256;
eavlSimpleReverseIndexOp_kernel<<<numBlocks, numThreads>>>
(nInputVals,
d_inOF, inOFdiv, inOFmod, inOFmul, inOFadd,
d_inOI, inOIdiv, inOImod, inOImul, inOIadd,
d_outII, outIImul, outIIadd);
CUDA_CHECK_ERROR();
}
#endif
// ****************************************************************************
// Class: eavlSimpleReverseIndexOp
//
// Purpose:
/// Given an input array of booleans, and an input array of output starting
/// indices (usually created by the caller using an exclusive scan of the
/// first array), generate an output array containing a map back to the
/// input index.
///
/// For example, if inOutputFlag is [0 1 1 0 1 0],
/// and inOutputIndex is thus [0 0 1 2 2 3], then
/// the result in outInputIndex will be [1 2 4] (i.e. the list of
/// indices from the input array which were set to 1).
//
// Programmer: Jeremy Meredith
// Creation: March 3, 2012
//
// Modifications:
// ****************************************************************************
class eavlSimpleReverseIndexOp : public eavlOperation
{
protected:
eavlArrayWithLinearIndex inOutputFlag;
eavlArrayWithLinearIndex inOutputIndex;
eavlArrayWithLinearIndex outInputIndex;
public:
eavlSimpleReverseIndexOp(eavlArrayWithLinearIndex inOutputFlag_,
eavlArrayWithLinearIndex inOutputIndex_,
eavlArrayWithLinearIndex outInputIndex_)
: inOutputFlag(inOutputFlag_),
inOutputIndex(inOutputIndex_),
outInputIndex(outInputIndex_)
{
}
virtual void GoCPU()
{
int n = inOutputFlag.array->GetNumberOfTuples();
eavlIntArray *inOF = dynamic_cast<eavlIntArray*>(inOutputFlag.array);
eavlIntArray *inOI = dynamic_cast<eavlIntArray*>(inOutputIndex.array);
eavlIntArray *outII = dynamic_cast<eavlIntArray*>(outInputIndex.array);
if (!inOF || !inOI || !outII)
THROW(eavlException,"eavlSimpleReverseIndexOp expects all integer arrays.");
eavlSimpleReverseIndexOp_CPU(n,
(int*)inOF->GetHostArray(), inOutputFlag.div, inOutputFlag.mod, inOutputFlag.mul, inOutputFlag.add,
(int*)inOI->GetHostArray(), inOutputIndex.div, inOutputIndex.mod, inOutputIndex.mul, inOutputIndex.add,
(int*)outII->GetHostArray(), outInputIndex.mul, outInputIndex.add);
}
virtual void GoGPU()
{
#if defined __CUDACC__
int n = inOutputFlag.array->GetNumberOfTuples();
eavlIntArray *inOF = dynamic_cast<eavlIntArray*>(inOutputFlag.array);
eavlIntArray *inOI = dynamic_cast<eavlIntArray*>(inOutputIndex.array);
eavlIntArray *outII = dynamic_cast<eavlIntArray*>(outInputIndex.array);
if (!inOF || !inOI || !outII)
THROW(eavlException,"eavlSimpleReverseIndexOp expects all integer arrays.");
eavlSimpleReverseIndexOp_GPU(n,
(int*)inOF->GetCUDAArray(), inOutputFlag.div, inOutputFlag.mod, inOutputFlag.mul, inOutputFlag.add,
(int*)inOI->GetCUDAArray(), inOutputIndex.div, inOutputIndex.mod, inOutputIndex.mul, inOutputIndex.add,
(int*)outII->GetCUDAArray(), outInputIndex.mul, outInputIndex.add);
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
#endif
|
traversalIntraTask.c | int my2(int a) {
newFunc();
18;
#pragma omp barrier
int q;
// return 1;
im4: return a;
}
int my(int a) {
im4: return a;
}
int foo(int aFoo, int bFoo, int cFoo) {
int tempo;
tempo = 0;
tempo += 1;
if (aFoo > 10) {
testFoo();
}
if (bFoo > 20) {
return aFoo + bFoo + cFoo;
}
g1 = 10;
g2 = 100 + g1;
int a;
lNotTestFoo: return my(18);
}
void te() {
23;
}
void newFunc2() {
if (0 == 1)
33;
// newFunc();
int c = 30;
int d = 40;
testThis: if (d == 40) {
return;
} else if (d > 40) {
my2(10);
c = 60;
// newFoo();
te();
17;
// my2(10);
d = 50;
// } else {
// my2(10);
// c = 60;
}
}
void newFunc() {
// int a = 10;
// int b = 20;
34;
// my2(10);
int d;
#pragma omp parallel
{
#pragma omp parallel
{
testThis: 2;
// a = 100;
// newFunc2();
// my2(1);
// d = 50;
5;
if (0) {
35;
// newFunc();
}
// a = 110;
#pragma omp parallel
{
11;
#pragma omp barrier
#pragma omp parallel
{
int x2;
#pragma omp barrier
}
}
while (0) {
#pragma omp barrier
int y;
l: y = 3;
}
goto l;
int pqr;
}
int z = 100;
}
return;
}
int main(int argc, char * argv[]) {
newFunc();
112;
newFunc2();
12;
}
|
main.c | typedef int T[10];
void foo(T *A) {
#pragma omp parallel default(shared)
{
#pragma omp for
for (int I = 0; I < 10; ++I)
for (int J = 0; J < 10; ++J)
A[I][J] = 0;
}
}
|
pmv-secuencial.c | #include <stdlib.h>
#include <stdio.h>
#include<time.h>
//#define PRINT_ALL
#define VECTOR_GLOBAL
//#define VECTOR_DYNAMIC
#ifdef VECTOR_GLOBAL
#define MAX 32768 //=2^10
double v[MAX], m[MAX][MAX], r[MAX];
#endif
int main(int argc,char** argv){
if (argc<2){
printf("Faltan nº componentes del vector \n");
exit(-1);
}
struct timespec cgt1,cgt2;
double ncgt; //para tiempo de ejecución
int i, j;
unsigned int N = atoi(argv[1]); // Máximo N =2^32 -1=4294967295 (sizeof(unsigned int) = 4 B)
#ifdef VECTOR_GLOBAL
if (N>MAX)
N=MAX;
#endif
#ifdef VECTOR_DYNAMIC
double *v, **m, *r;
v = (double*) malloc(N*sizeof(double)); // malloc necesita el tamaño en bytes
m = (double**) malloc(N*sizeof(double*)); //si no hay espacio suficiente malloc devuelve NULL
for (i=0; i<N; i++)
m[i] = (double*) malloc(N*sizeof(double));
r = (double*) malloc(N*sizeof(double));
if ((v==NULL) || (m==NULL) || (r==NULL)) {
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
//Inicializar vector y matriz
#pragma omp parallel for
for (i=0; i<N; i++) {
v[i] = N*0.1+ i*0.1;
for (j=0; j<N; j++)
m[i][j] = v[i]*0.1+j*0.1;
}
//Comprobamos la incialización
#ifdef PRINT_ALL
printf(" Vector:\n");
for (i=0; i<N; i++) {
printf("\t%f", v[i]);
}
printf("\n\n Matriz: \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++)
printf("\t%f", m[i][j]);
printf("\n\n");
}
#endif
clock_gettime(CLOCK_REALTIME,&cgt1);
//Calcular el producto
int sum;
for (i=0; i<N; i++) {
sum = 0;
for (j=0; j<N; j++)
sum += m[i][j]*v[j];
r[i] = sum;
}
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt = (double) (cgt2.tv_sec - cgt1.tv_sec) +
(double) ((cgt2.tv_nsec - cgt1.tv_nsec)/(1.e+9));
//Imprimir resultado del producto
printf("\n Resultado:\n");
#ifdef PRINT_ALL
for (i=0; i<N; i++) {
printf("\t%f", r[i]);
}
printf("\n");
#else
printf("Primer valor: %f \t Último valor: %f \n", r[0], r[N-1]);
#endif
printf("\n Tiempo de ejecución(s): %11.9f\n", ncgt);
#ifdef VECTOR_DYNAMIC
free(v); // libera el espacio reservado para v
free(m); // libera el espacio reservado para m
free(r);
#endif
return 0;
}
|
normalize_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
*/
#include "normalize_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void norm_channel(float* input, float* output, float* buffer, float* scale, int hw, int channel, int num_thread)
{
memset(buffer, 0, hw * sizeof(float));
//#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel; i++)
{
for (int j = 0; j < hw; j++)
{
float data = *(input + i * hw + j);
buffer[j] += (data * data);
}
}
//#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < hw; j++)
{
buffer[j] = 1.f / sqrt(buffer[j]);
}
//#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel; i++)
{
for (int j = 0; j < hw; j++)
{
float data = *(input + i * hw + j);
*(output + i * hw + j) = data * buffer[j] * scale[i];
}
}
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* scale_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
normalize_param_t* param = (normalize_param_t*)(ir_node->op.param_mem);
float* input_org = (float*)input_tensor->data;
float* output_org = (float*)output_tensor->data;
float* sclae_org = (float*)scale_tensor->data;
int batch_number = input_tensor->dims[0];
int channel_num = input_tensor->dims[1];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int img_size = channel_num * channel_size;
float* buffer = (float*)sys_malloc(channel_size * sizeof(float));
if (param->channel_shared == 0 && param->across_spatial == 0)
{
for (int i = 0; i < batch_number; i++)
{
norm_channel(input_org, output_org, buffer, sclae_org, channel_size, channel_num, exec_graph->num_thread);
input_org += img_size;
output_org += img_size;
}
}
sys_free(buffer);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops normalize_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_normalize_ref_op()
{
return register_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops);
}
int unregister_normalize_ref_op()
{
return unregister_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops);
}
|
RecordTable.h | /*
* Souffle - A Datalog Compiler
* Copyright (c) 2020, The Souffle Developers. All rights reserved.
* Licensed under the Universal Permissive License v 1.0 as shown at:
* - https://opensource.org/licenses/UPL
* - <souffle root>/licenses/SOUFFLE-UPL.txt
*/
/************************************************************************
*
* @file RecordTable.h
*
* Data container implementing a map between records and their references.
* Records are separated by arity, i.e., stored in different RecordMaps.
*
***********************************************************************/
#pragma once
#include "souffle/CompiledTuple.h"
#include "souffle/RamTypes.h"
#include <cassert>
#include <cstddef>
#include <limits>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
namespace souffle {
/** @brief Bidirectional mappping between records and record references */
class RecordMap {
/** arity of record */
const size_t arity;
/** hash function for unordered record map */
struct RecordHash {
std::size_t operator()(std::vector<RamDomain> record) const {
std::size_t seed = 0;
std::hash<RamDomain> domainHash;
for (RamDomain value : record) {
seed ^= domainHash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
return seed;
}
};
/** map from records to references */
// TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal
std::unordered_map<std::vector<RamDomain>, RamDomain, RecordHash> recordToIndex;
/** array of records; index represents record reference */
// TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal
std::vector<std::vector<RamDomain>> indexToRecord;
public:
explicit RecordMap(size_t arity) : arity(arity), indexToRecord(1) {} // note: index 0 element left free
/** @brief converts record to a record reference */
// TODO (b-scholz): replace vector<RamDomain> with something more memory-frugal
RamDomain pack(const std::vector<RamDomain>& vector) {
RamDomain index;
#pragma omp critical(record_pack)
{
auto pos = recordToIndex.find(vector);
if (pos != recordToIndex.end()) {
index = pos->second;
} else {
#pragma omp critical(record_unpack)
{
indexToRecord.push_back(vector);
index = static_cast<RamDomain>(indexToRecord.size()) - 1;
recordToIndex[vector] = index;
// assert that new index is smaller than the range
assert(index != std::numeric_limits<RamDomain>::max());
}
}
}
return index;
}
/** @brief convert record pointer to a record reference */
RamDomain pack(const RamDomain* tuple) {
// TODO (b-scholz): data is unnecessarily copied
// for a successful lookup. To avoid this, we should
// compute a hash of the pointer-array and traverse through
// the bucket list of the unordered map finding the record.
// Note that in case of non-existence, the record still needs to be
// copied for the newly created entry but this will be the less
// frequent case.
std::vector<RamDomain> tmp(arity);
for (size_t i = 0; i < arity; i++) {
tmp[i] = tuple[i];
}
return pack(tmp);
}
/** @brief convert record reference to a record pointer */
const RamDomain* unpack(RamDomain index) const {
const RamDomain* res;
#pragma omp critical(record_unpack)
res = indexToRecord[index].data();
return res;
}
};
class RecordTable {
public:
RecordTable() = default;
virtual ~RecordTable() = default;
/** @brief convert record to record reference */
RamDomain pack(RamDomain* tuple, size_t arity) {
return lookupArity(arity).pack(tuple);
}
/** @brief convert record reference to a record */
const RamDomain* unpack(RamDomain ref, size_t arity) const {
std::unordered_map<size_t, RecordMap>::const_iterator iter;
#pragma omp critical(RecordTableGetForArity)
{
// Find a previously emplaced map
iter = maps.find(arity);
}
assert(iter != maps.end() && "Attempting to unpack record for non-existing arity");
return (iter->second).unpack(ref);
}
private:
/** @brief lookup RecordMap for a given arity; if it does not exist, create new RecordMap */
RecordMap& lookupArity(size_t arity) {
std::unordered_map<size_t, RecordMap>::iterator mapsIterator;
#pragma omp critical(RecordTableGetForArity)
{
// This will create a new map if it doesn't exist yet.
mapsIterator = maps.emplace(arity, arity).first;
}
return mapsIterator->second;
}
/** Arity/RecordMap association */
std::unordered_map<size_t, RecordMap> maps;
};
/** @brief helper to convert tuple to record reference for the synthesiser */
template <std::size_t Arity>
inline RamDomain pack(RecordTable& recordTab, Tuple<RamDomain, Arity> tuple) {
return recordTab.pack(static_cast<RamDomain*>(tuple.data), Arity);
}
} // namespace souffle
|
7551.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute parallel for simd private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
mpi_vector.h | #pragma once
#include <cassert>
#include <thrust/host_vector.h>
#include <thrust/gather.h>
#include "exceptions.h"
#include "exblas/mpi_accumulate.h"
#include "tensor_traits.h"
#include "blas1_dispatch_shared.h"
#include "mpi_communicator.h"
#include "memory.h"
#include "config.h"
//TODO: should we catch the cases where outer_size \in {1,2,3} in NearestNeighborComm?
namespace dg
{
/**
* @brief mpi Vector class
*
* @ingroup mpi_structures
*
* This class is a simple wrapper around a container object and an MPI_Comm.
* The blas1 and blas2 functionality is available iff it is available for the container type.
* We use mpi to communicate (e.g. boundary points in matrix-vector multiplications)
* and use the existing blas functions for the local computations.
* (At the blas level 1 communication is needed only for scalar products)
* @tparam container local container type. Must have a \c size() and a \c swap() member function and a specialization of the \c TensorTraits class.
*/
template<class container>
struct MPI_Vector
{
typedef container container_type;//!< typedef to acces underlying container
///no data is allocated, communicators are \c MPI_COMM_NULL
MPI_Vector(){
m_comm = m_comm128 = m_comm128Reduce = MPI_COMM_NULL;
}
/**
* @brief construct a vector
*
* calls \c exblas::mpi_reduce_communicator() (collective call)
* @param data internal data copy
* @param comm MPI communicator (may not be \c MPI_COMM_NULL)
*/
MPI_Vector( const container& data, MPI_Comm comm): m_data( data), m_comm(comm) {
exblas::mpi_reduce_communicator( comm, &m_comm128, &m_comm128Reduce);
}
/**
* @brief Conversion operator
*
* uses conversion between compatible containers
* @tparam OtherContainer another container class (container must be copy constructible from OtherContainer)
* @param src the source
*/
template<class OtherContainer>
MPI_Vector( const MPI_Vector<OtherContainer>& src){
m_data = src.data();
m_comm = src.communicator();
m_comm128 = src.communicator_mod();
m_comm128Reduce = src.communicator_mod_reduce();
}
///@brief Get underlying data
///@return read access to data
const container& data() const {return m_data;}
///@brief Set underlying data
///@return write access to data
container& data() {return m_data;}
///@brief Get the communicator to which this vector belongs
///@return read access to MPI communicator
MPI_Comm communicator() const{return m_comm;}
///@brief Returns a communicator of fixed size 128
MPI_Comm communicator_mod() const{return m_comm128;}
/**
* @brief Returns a communicator consisting of all processes with rank 0 in \c communicator_mod()
*
* @return returns MPI_COMM_NULL to processes not part of that group
*/
MPI_Comm communicator_mod_reduce() const{return m_comm128Reduce;}
/**
* @brief Set the communicators with \c exblas::mpi_reduce_communicator
*/
void set_communicator(MPI_Comm comm, MPI_Comm comm_mod, MPI_Comm comm_mod_reduce){
m_comm = comm;
m_comm128 = comm_mod;
m_comm128Reduce = comm_mod_reduce;
}
///@brief Return the size of the data object
///@return local size
unsigned size() const{return m_data.size();}
///@brief Swap data and communicator
///@param src communicator and data is swapped
void swap( MPI_Vector& src){
m_data.swap(src.m_data);
std::swap( m_comm , src.m_comm);
std::swap( m_comm128 , src.m_comm128);
std::swap( m_comm128Reduce , src.m_comm128Reduce);
}
private:
container m_data;
MPI_Comm m_comm, m_comm128, m_comm128Reduce;
};
///@addtogroup dispatch
///@{
///@brief prototypical MPI vector
template<class container>
struct TensorTraits<MPI_Vector<container> > {
using value_type = get_value_type<container>;
using tensor_category = MPIVectorTag;
using execution_policy = get_execution_policy<container>;
};
///@}
/////////////////////////////communicator//////////////////////////
/**
* @brief Communicator for asynchronous nearest neighbor communication
*
* Imagine a communicator with Cartesian topology and further imagine that the
* grid topology is also Cartesian (vectors form a box) in two or three dimensions.
* In each direction this box has a boundary layer (the halo) of a depth given by
* the user. Each boundary layer has two neighboring layers, one on the same process
* and one lying on the neighboring process.
* What this class does is to provide you with six pointers to each of these
* six layers (three on each side). The pointers either reference data in an
* internal communication buffer (since it involves communciation to get the
* layers from neighboring processes) another buffer (if mpi communication
* requires to reorder input data) or the input vector itself (if the
* communication goes along the last dimension there is no need to reorder,
* in fact, here is the main gain we get from the pointer approach, we save
* on unnecessary data copies, which might be significant in cases where
* the communication to computation ratio is high).
* The size of the data each pointer references is the halo size, \c buffer_size()
*
* The communication is done asynchronously i.e. the user can initiate
* the communication and signal when the results are needed at a later stage.
*
* @note If the number of neighboring processes in the given direction is 1,
* the buffer size is 0 and all members return immediately.
* @note the pointers may alias each other (if the input contains less than 4 layers)
*
* @note the corresponding gather map is of general type and the communication
* can also be modeled in \c GeneralComm, but not \c BijectiveComm or \c SurjectiveComm
* @attention Currently we cannot handle the case where the whole vector is
* the boundary layer (i.e. \c buffer_size()==input.size() and both neighboring layers are on different processes)
* @ingroup mpi_structures
* @tparam Index the type of index container (must be either thrust::host_vector<int> or thrust::device_vector<int>)
* @tparam Buffer the container for the pointers to the buffer arrays
* @tparam Vector the vector container type must have a resize() function and work
* in the thrust library functions ( i.e. must a thrust::host_vector or thrust::device_vector)
* @sa dg::RowColDistMat
*/
template<class Index, class Buffer, class Vector>
struct NearestNeighborComm
{
using container_type = Vector;
using buffer_type = Buffer;
using pointer_type = get_value_type<Vector>*;
using const_pointer_type = get_value_type<Vector> const *;
///@brief no communication
NearestNeighborComm(){
m_comm = MPI_COMM_NULL;
m_silent = true;
}
/**
* @brief Construct
*
* @param n depth of the halo
* @param vector_dimensions {x, y, z} dimension (total number of points)
* @param comm the (cartesian) communicator
* @param direction 0 is x, 1 is y, 2 is z
*/
NearestNeighborComm( unsigned n, const unsigned vector_dimensions[3], MPI_Comm comm, unsigned direction)
{
static_assert( std::is_same<const_pointer_type, get_value_type<Buffer>>::value, "Must be same pointer types");
construct( n, vector_dimensions, comm, direction);
}
/**
* @brief Construct from other Communicator
*
* Simply copies halo size, dimensions, communicator and direction and
constructs a new object
* @tparam OtherIndex other index type
* @tparam OtherVector other container type
* @param src source object
*/
template< class OtherIndex, class OtherBuffer, class OtherVector>
NearestNeighborComm( const NearestNeighborComm<OtherIndex, OtherBuffer, OtherVector>& src){
if( src.buffer_size() == 0) m_silent=true;
else
construct( src.n(), src.dims(), src.communicator(), src.direction());
}
/**
* @brief halo size
* @return halo size
*/
unsigned n() const{return m_n;}
/**
* @brief The dimensionality of the input vector
* @return dimensions ( 3)
*/
const unsigned* dims() const{return m_dim;}
/**
* @brief The direction of communication
*
* @return direction
*/
unsigned direction() const {return m_direction;}
///@copydoc aCommunicator::communicator()
MPI_Comm communicator() const{return m_comm;}
/**
* @brief Allocate a buffer object
*
* The buffer object is only a colletion of pointers to the actual data
* @return a buffer object on the stack
* @note if \c buffer_size()==0 the default constructor of \c Buffer is called
*/
Buffer allocate_buffer( )const{
if( buffer_size() == 0 ) return Buffer();
return Buffer(6);
}
/** @brief The size of the halo
* @return the size of the halo (0 if no communication)
*/
unsigned buffer_size() const;
///@copydoc aCommunicator::isCommunicating()
bool isCommunicating() const{
if( buffer_size() == 0) return false;
return true;
}
/**
* @brief Map a local matrix index to a buffer index
* @param i matrix index
* @return buffer index (0,1,...,5)
*/
int map_index(int i) const{
if( i==-1) return 0;
if( i== 0) return 1;
if( i==+1) return 2;
if( i==(int)m_outer_size-0) return 5;
if( i==(int)m_outer_size-1) return 4;
if( i==(int)m_outer_size-2) return 3;
throw Error( Message(_ping_)<<"Index not mappable!");
return -1;
}
/**
* @brief Gather values from given Vector and initiate asynchronous MPI communication
* @param input from which to gather data (it is @b unsafe to change values on return)
* @param buffer (write only) pointers to the received data after \c global_gather_wait() was called (must be allocated by \c allocate_buffer())
* @param rqst four request variables that can be used to call MPI_Waitall
*/
void global_gather_init( const_pointer_type input, buffer_type& buffer, MPI_Request rqst[4])const
{
unsigned size = buffer_size();
//init pointers on host
const_pointer_type host_ptr[6];
if(m_trivial)
{
host_ptr[0] = thrust::raw_pointer_cast(&m_internal_buffer.data()[0*size]);
host_ptr[1] = input;
host_ptr[2] = input+size;
host_ptr[3] = input+(m_outer_size-2)*size;
host_ptr[4] = input+(m_outer_size-1)*size;
host_ptr[5] = thrust::raw_pointer_cast(&m_internal_buffer.data()[5*size]);
}
else
{
host_ptr[0] = thrust::raw_pointer_cast(&m_internal_buffer.data()[0*size]);
host_ptr[1] = thrust::raw_pointer_cast(&m_internal_buffer.data()[1*size]);
host_ptr[2] = thrust::raw_pointer_cast(&m_internal_buffer.data()[2*size]);
host_ptr[3] = thrust::raw_pointer_cast(&m_internal_buffer.data()[3*size]);
host_ptr[4] = thrust::raw_pointer_cast(&m_internal_buffer.data()[4*size]);
host_ptr[5] = thrust::raw_pointer_cast(&m_internal_buffer.data()[5*size]);
}
//copy pointers to device
thrust::copy( host_ptr, host_ptr+6, buffer.begin());
//fill internal_buffer if !trivial
do_global_gather_init( get_execution_policy<Vector>(), input, rqst);
sendrecv( host_ptr[1], host_ptr[4],
thrust::raw_pointer_cast(&m_internal_buffer.data()[0*size]), //host_ptr is const!
thrust::raw_pointer_cast(&m_internal_buffer.data()[5*size]), //host_ptr is const!
rqst);
}
/**
* @brief Wait for asynchronous communication to finish and gather received data into buffer
*
* Calls MPI_Waitall on the \c rqst variables and may do additional cleanup. After this call returns it is safe to use data the buffer points to.
* @param input from which to gather data (it is safe to change values on return since values to communicate are copied into \c buffer)
* @param buffer (write only) where received data resides on return (must be allocated by \c allocate_buffer())
* @param rqst the same four request variables that were used in global_gather_init
*/
void global_gather_wait(const_pointer_type input, const buffer_type& buffer, MPI_Request rqst[4])const
{
MPI_Waitall( 4, rqst, MPI_STATUSES_IGNORE );
#ifdef _DG_CUDA_UNAWARE_MPI
unsigned size = buffer_size();
cudaMemcpy( thrust::raw_pointer_cast(&m_internal_buffer.data()[0*size]), //dst
thrust::raw_pointer_cast(&m_internal_host_buffer.data()[0*size]), //src
size*sizeof(get_value_type<Vector>), cudaMemcpyHostToDevice);
cudaMemcpy( thrust::raw_pointer_cast(&m_internal_buffer.data()[5*size]), //dst
thrust::raw_pointer_cast(&m_internal_host_buffer.data()[5*size]), //src
size*sizeof(get_value_type<Vector>), cudaMemcpyHostToDevice);
#endif
}
private:
void do_global_gather_init( OmpTag, const_pointer_type, MPI_Request rqst[4])const;
void do_global_gather_init( SerialTag, const_pointer_type, MPI_Request rqst[4])const;
void do_global_gather_init( CudaTag, const_pointer_type, MPI_Request rqst[4])const;
void construct( unsigned n, const unsigned vector_dimensions[3], MPI_Comm comm, unsigned direction);
unsigned m_n, m_dim[3]; //deepness, dimensions
MPI_Comm m_comm;
unsigned m_direction;
bool m_silent, m_trivial=false; //silent -> no comm, m_trivial-> comm in last dim
unsigned m_outer_size = 1; //size of vector in units of buffer_size
Index m_gather_map_middle;
dg::Buffer<Vector> m_internal_buffer;
#ifdef _DG_CUDA_UNAWARE_MPI
//a copy of the data on the host (we need to send data manually through the host)
dg::Buffer<thrust::host_vector<get_value_type<Vector>>> m_internal_host_buffer;
#endif
void sendrecv(const_pointer_type, const_pointer_type, pointer_type, pointer_type, MPI_Request rqst[4])const;
int m_source[2], m_dest[2];
};
///@cond
template<class I, class B, class V>
void NearestNeighborComm<I,B,V>::construct( unsigned n, const unsigned dimensions[3], MPI_Comm comm, unsigned direction)
{
static_assert( std::is_base_of<SharedVectorTag, get_tensor_category<V>>::value,
"Only Shared vectors allowed");
m_silent=false;
m_n=n;
m_dim[0] = dimensions[0], m_dim[1] = dimensions[1], m_dim[2] = dimensions[2];
m_direction = direction;
if( dimensions[2] == 1 && direction == 1) m_trivial = true;
else if( direction == 2) m_trivial = true;
else m_trivial = false;
assert( direction <3);
m_comm = comm;
//mpi_cart_shift may return MPI_PROC_NULL then the receive buffer is not modified
MPI_Cart_shift( m_comm, m_direction, -1, &m_source[0], &m_dest[0]);
MPI_Cart_shift( m_comm, m_direction, +1, &m_source[1], &m_dest[1]);
{
int ndims;
MPI_Cartdim_get( comm, &ndims);
int dims[ndims], periods[ndims], coords[ndims];
MPI_Cart_get( comm, ndims, dims, periods, coords);
if( dims[direction] == 1) m_silent = true;
}
if( !m_silent)
{
m_outer_size = dimensions[0]*dimensions[1]*dimensions[2]/buffer_size();
assert( m_outer_size > 1 && "Parallelization too fine grained!"); //right now we cannot have that
thrust::host_vector<int> mid_gather( 4*buffer_size());
switch( direction)
{
case( 0):
for( unsigned i=0; i<m_dim[2]*m_dim[1]; i++)
for( unsigned j=0; j<n; j++)
{
mid_gather[(0*n+j)*m_dim[2]*m_dim[1]+i] = i*m_dim[0] + j;
mid_gather[(1*n+j)*m_dim[2]*m_dim[1]+i] = i*m_dim[0] + n + j;
mid_gather[(2*n+j)*m_dim[2]*m_dim[1]+i] = i*m_dim[0] + m_dim[0]-2*n + j;
mid_gather[(3*n+j)*m_dim[2]*m_dim[1]+i] = i*m_dim[0] + m_dim[0]- n + j;
}
break;
case( 1):
for( unsigned i=0; i<m_dim[2]; i++)
for( unsigned j=0; j<n; j++)
for( unsigned k=0; k<m_dim[0]; k++)
{
mid_gather[((0*n+j)*m_dim[2]+i)*m_dim[0] + k] = (i*m_dim[1] + j)*m_dim[0] + k;
mid_gather[((1*n+j)*m_dim[2]+i)*m_dim[0] + k] = (i*m_dim[1] + n + j)*m_dim[0] + k;
mid_gather[((2*n+j)*m_dim[2]+i)*m_dim[0] + k] = (i*m_dim[1] + m_dim[1]-2*n + j)*m_dim[0] + k;
mid_gather[((3*n+j)*m_dim[2]+i)*m_dim[0] + k] = (i*m_dim[1] + m_dim[1]- n + j)*m_dim[0] + k;
}
break;
case( 2):
for( unsigned i=0; i<n; i++)
for( unsigned j=0; j<m_dim[0]*m_dim[1]; j++)
{
mid_gather[(0*n+i)*m_dim[0]*m_dim[1]+j] = (i )*m_dim[0]*m_dim[1] + j;
mid_gather[(1*n+i)*m_dim[0]*m_dim[1]+j] = (i + n )*m_dim[0]*m_dim[1] + j;
mid_gather[(2*n+i)*m_dim[0]*m_dim[1]+j] = (i + m_dim[2]-2*n )*m_dim[0]*m_dim[1] + j;
mid_gather[(3*n+i)*m_dim[0]*m_dim[1]+j] = (i + m_dim[2]- n )*m_dim[0]*m_dim[1] + j;
}
break;
}
m_gather_map_middle = mid_gather; //transfer to device
m_internal_buffer.data().resize( 6*buffer_size() );
#ifdef _DG_CUDA_UNAWARE_MPI
m_internal_host_buffer.data().resize( 6*buffer_size() );
#endif
}
}
template<class I, class B, class V>
unsigned NearestNeighborComm<I,B,V>::buffer_size() const
{
if( m_silent) return 0;
switch( m_direction)
{
case( 0): //x-direction
return m_n*m_dim[1]*m_dim[2];
case( 1): //y-direction
return m_n*m_dim[0]*m_dim[2];
case( 2): //z-direction
return m_n*m_dim[0]*m_dim[1]; //no further m_n (hide in m_dim)
default:
return 0;
}
}
template<class I, class B, class V>
void NearestNeighborComm<I,B,V>::do_global_gather_init( SerialTag, const_pointer_type input, MPI_Request rqst[4]) const
{
if( !m_trivial)
{
unsigned size = buffer_size();
for( unsigned i=0; i<4*size; i++)
m_internal_buffer.data()[i+size] = input[m_gather_map_middle[i]];
}
}
#ifdef _OPENMP
template<class I, class B, class V>
void NearestNeighborComm<I,B,V>::do_global_gather_init( OmpTag, const_pointer_type input, MPI_Request rqst[4]) const
{
if(!m_trivial)
{
unsigned size = buffer_size();
#pragma omp parallel for
for( unsigned i=0; i<4*size; i++)
m_internal_buffer.data()[size+i] = input[m_gather_map_middle[i]];
}
}
#endif
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
template<class I, class B, class V>
void NearestNeighborComm<I,B,V>::do_global_gather_init( CudaTag, const_pointer_type input, MPI_Request rqst[4]) const
{
//gather values from input into sendbuffer
if(!m_trivial)
{
unsigned size = buffer_size();
thrust::gather( thrust::cuda::tag(), m_gather_map_middle.begin(), m_gather_map_middle.end(), input, m_internal_buffer.data().begin()+size);
}
cudaDeviceSynchronize(); //wait until device functions are finished before sending data
}
#endif
template<class I, class B, class V>
void NearestNeighborComm<I,B,V>::sendrecv( const_pointer_type sb1_ptr, const_pointer_type sb2_ptr, pointer_type rb1_ptr, pointer_type rb2_ptr, MPI_Request rqst[4]) const
{
unsigned size = buffer_size();
#ifdef _DG_CUDA_UNAWARE_MPI
cudaMemcpy( thrust::raw_pointer_cast(&m_internal_host_buffer.data()[1*size]),//dst
sb1_ptr, size*sizeof(get_value_type<V>), cudaMemcpyDeviceToHost); //src
cudaMemcpy( thrust::raw_pointer_cast(&m_internal_host_buffer.data()[4*size]), //dst
sb2_ptr, size*sizeof(get_value_type<V>), cudaMemcpyDeviceToHost); //src
sb1_ptr = thrust::raw_pointer_cast(&m_internal_host_buffer.data()[1*size]);
sb2_ptr = thrust::raw_pointer_cast(&m_internal_host_buffer.data()[4*size]);
rb1_ptr = thrust::raw_pointer_cast(&m_internal_host_buffer.data()[0*size]);
rb2_ptr = thrust::raw_pointer_cast(&m_internal_host_buffer.data()[5*size]);
//This is a mistake if called with a host_vector
#endif
MPI_Isend( sb1_ptr, size,
getMPIDataType<get_value_type<V>>(), //sender
m_dest[0], 3, m_comm, &rqst[0]); //destination
MPI_Irecv( rb2_ptr, size,
getMPIDataType<get_value_type<V>>(), //receiver
m_source[0], 3, m_comm, &rqst[1]); //source
MPI_Isend( sb2_ptr, size,
getMPIDataType<get_value_type<V>>(), //sender
m_dest[1], 9, m_comm, &rqst[2]); //destination
MPI_Irecv( rb1_ptr, size,
getMPIDataType<get_value_type<V>>(), //receiver
m_source[1], 9, m_comm, &rqst[3]); //source
}
///@endcond
}//namespace dg
|
spectralnorm-4.c | /* The Computer Language Benchmarks Game
* http://benchmarksgame.alioth.debian.org/
*
* Original C contributed by Sebastien Loisel
* Conversion to C++ by Jon Harrop
* OpenMP parallelize by The Anh Tran
* Add SSE by The Anh Tran
* Reconversion into C by Dan Farina
*/
#define _GNU_SOURCE
#include <omp.h>
#include <math.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#define false 0
#define true 1
/* define SIMD data type. 2 doubles encapsulated in one XMM register */
typedef double v2dt __attribute__((vector_size(16)));
static const v2dt v1 = {1.0, 1.0};
/* parameter for evaluate functions */
struct Param
{
double* u; /* source vector */
double* tmp; /* temporary */
double* v; /* destination vector */
int N; /* source/destination vector length */
int N2; /* = N/2 */
int r_begin; /* working range of each thread */
int r_end;
};
/* Return: 1.0 / (i + j) * (i + j +1) / 2 + i + 1; */
static double
eval_A(int i, int j)
{
/*
* 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* n * (n+1) is even number. Therefore, just (>> 1) for (/2)
*/
int d = (((i+j) * (i+j+1)) >> 1) + i+1;
return 1.0 / d;
}
/*
* Return type: 2 doubles in xmm register [double1, double2]
* double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* double2 = 1.0 / (i+1 + j) * (i+1 + j +1) / 2 + i+1 + 1;
*/
static v2dt
eval_A_i(int i, int j)
{
int d1 = (((i+j) * (i+j+1)) >> 1) + i+1;
int d2 = (((i+1 +j) * (i+1 +j+1)) >> 1) + (i+1) +1;
v2dt r = {d1, d2};
return v1 / r;
}
/*
* Return type: 2 doubles in xmm register [double1, double2]
* double1 = 1.0 / (i + j) * (i + j +1) / 2 + i + 1;
* double2 = 1.0 / (i + j+1) * (i + j+1 +1) / 2 + i + 1;
*/
static v2dt
eval_A_j(int i, int j)
{
int d1 = (((i+j) * (i+j+1)) >> 1) + i+1;
int d2 = (((i+ j+1) * (i+ j+1 +1)) >> 1) + i+1;
v2dt r = {d1, d2};
return v1 / r;
}
/* This function is called by many threads */
static void
eval_A_times_u(struct Param *p)
{
/* alias of source vector */
const v2dt *pU = (void *) p->u;
int i;
int ie;
for (i = p->r_begin, ie = p->r_end; i < ie; i++)
{
v2dt sum = {0, 0};
/* xmm = 2 doubles. This loop run from [0 .. N/2) */
int j;
for (j = 0; j < p->N2; j++)
sum += pU[j] * eval_A_j(i, j*2);
/* write result */
{
double *mem = (void *) ∑
p->tmp[i] = mem[0] + mem[1];
}
/* If source vector is odd size. This should be called <= 1 time */
for (j = j*2; __builtin_expect(j < p->N, false); j++)
p->tmp[i] += eval_A(i, j) * p->u[j];
}
}
static void
eval_At_times_u(struct Param *p)
{
const v2dt *pT = (void *) p->tmp;
int i;
int ie;
for (i = p->r_begin, ie = p->r_end; i < ie; i++)
{
v2dt sum = {0, 0};
int j;
for (j = 0; j < p->N2; j++)
sum += pT[j] * eval_A_i(j*2, i);
{
double *mem = (void *) ∑
p->v[i] = mem[0] + mem[1];
}
/* odd size array */
for (j = j*2; __builtin_expect(j < p->N, false); j++)
p->v[i] += eval_A(j, i) * p->tmp[j];
}
}
/*
* Called by N threads.
*
* Each thread modifies its portion in destination vector -> barrier needed to
* sync access
*/
static void
eval_AtA_times_u(struct Param *p)
{
eval_A_times_u(p);
#pragma omp barrier
eval_At_times_u(p);
#pragma omp barrier
}
/*
* Shootout bench uses affinity to emulate single core processor. This
* function searches for appropriate number of threads to spawn.
*/
static int
GetThreadCount()
{
cpu_set_t cs;
int i;
int count = 0;
CPU_ZERO(&cs);
sched_getaffinity(0, sizeof(cs), &cs);
for (i = 0; i < 16; i++)
if (CPU_ISSET(i, &cs))
count++;
return count;
}
static double
spectral_game(int N)
{
/* Align 64 byte for L2 cache line */
__attribute__((aligned(64))) double u[N];
__attribute__((aligned(64))) double tmp[N];
__attribute__((aligned(64))) double v[N];
double vBv = 0.0;
double vv = 0.0;
#pragma omp parallel default(shared) num_threads(GetThreadCount())
{
int i;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
u[i] = 1.0;
/*
* this block will be executed by NUM_THREADS variable declared in this
* block is private for each thread
*/
int threadid = omp_get_thread_num();
int threadcount = omp_get_num_threads();
int chunk = N / threadcount;
int ite;
struct Param my_param;
my_param.tmp = tmp;
my_param.N = N;
my_param.N2 = N/2;
/*
* calculate each thread's working range [range1 .. range2) => static
* schedule here
*/
my_param.r_begin = threadid * chunk;
my_param.r_end = (threadid < (threadcount -1)) ?
(my_param.r_begin + chunk) : N;
for (ite = 0; ite < 10; ite++)
{
my_param.u = u; /* source vec is u */
my_param.v = v; /* destination vec is v */
eval_AtA_times_u(&my_param);
my_param.u = v; /* source is v */
my_param.v = u; /* destination is u */
eval_AtA_times_u(&my_param);
}
/* multi thread adding */
{
int i;
#pragma omp for schedule(static) reduction( + : vBv, vv ) nowait
for (i = 0; i < N; i++)
{
vv += v[i] * v[i];
vBv += u[i] * v[i];
}
}
}
/* end parallel region */
return sqrt(vBv/vv);
}
int
main(int argc, char *argv[])
{
int N = ((argc >= 2) ? atoi(argv[1]) : 2000);
printf("%.9f\n", spectral_game(N));
return 0;
}
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% John Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ``fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ``classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/segment.h"
#include "magick/string_.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
MagickRealType
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
MagickRealType
tau;
ssize_t
left,
right;
MagickRealType
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
MagickRealType
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static MagickRealType
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *),
ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const MagickRealType cluster_threshold,
% const MagickRealType weighting_exponent,
% const MagickBooleanType verbose)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const MagickRealType cluster_threshold,
const MagickRealType weighting_exponent,const MagickBooleanType verbose)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExceptionInfo
*exception;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickRealType
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register MagickRealType
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(MagickRealType) i*(MagickRealType) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,0);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(q->red) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->red) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(indexes+x,cluster->id);
break;
}
}
if (cluster == (Cluster *) NULL)
{
MagickRealType
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(indexes+x,j);
}
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline ssize_t MagickMax(const ssize_t x,const ssize_t y)
{
if (x > y)
return(x);
return(y);
}
static inline ssize_t MagickMin(const ssize_t x,const ssize_t y)
{
if (x < y)
return(x);
return(y);
}
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const MagickRealType *histogram,
% MagickRealType *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of MagickRealTypes is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const MagickRealType *histogram,
MagickRealType *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% MagickPixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
MagickPixelPacket *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
MagickRealType
threshold;
register const PixelPacket
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++;
p++;
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register MagickRealType
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(MagickRealType) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireMagickMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
MagickRealType
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
return(0.0);
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*derivative));
second_derivative=(MagickRealType *) AcquireQuantumMemory(256,
sizeof(*second_derivative));
if ((derivative == (MagickRealType *) NULL) ||
(second_derivative == (MagickRealType *) NULL))
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDerivatives");
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(MagickRealType) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(MagickRealType *) RelinquishMagickMemory(derivative);
second_derivative=(MagickRealType *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
return(0.0);
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(MagickRealType) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
% MagickRealType *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
MagickRealType *scale_histogram)
{
MagickRealType
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (MagickRealType *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=1.0/(tau*sqrt(2.0*MagickPI));
beta=(-1.0/(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(MagickRealType) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(MagickRealType *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold)
{
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
if (IsRGBColorspace(colorspace) == MagickFalse)
(void) TransformImageColorspace(image,colorspace);
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,&image->exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose);
if (IsRGBColorspace(colorspace) == MagickFalse)
(void) TransformImageColorspace(image,colorspace);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(MagickRealType *second_derivative,
% const MagickRealType smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of MagickRealTypes representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(MagickRealType *second_derivative,
const MagickRealType smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
point_outlier.h | /****************************************************************************
* VCGLib o o *
* Visual and Computer Graphics Library o o *
* _ O _ *
* Copyright(C) 2004-2016 \/)\/ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
****************************************************************************/
#ifndef VCG_TRI_OUTLIERS__H
#define VCG_TRI_OUTLIERS__H
#include <vcg/space/index/kdtree/kdtree.h>
namespace vcg
{
namespace tri
{
template <class MeshType>
class OutlierRemoval
{
public:
typedef typename MeshType::ScalarType ScalarType;
typedef typename vcg::KdTree<ScalarType> KdTreeType;
typedef typename vcg::KdTree<ScalarType>::PriorityQueue PriorityQueue;
/**
Compute an outlier probability value for each vertex of the mesh using the approch
in the paper "LoOP: Local Outlier Probabilities". The outlier probability is stored in the
vertex attribute "outlierScore". It use the input kdtree to find the kNearest of each vertex.
"LoOP: local outlier probabilities" by Hans-Peter Kriegel et al.
Proceedings of the 18th ACM conference on Information and knowledge management
*/
static void ComputeLoOPScore(MeshType& mesh, KdTreeType& kdTree, int kNearest)
{
vcg::tri::RequireCompactness(mesh);
typename MeshType::template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore"));
typename MeshType::template PerVertexAttributeHandle<ScalarType> sigma = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("sigma"));
typename MeshType::template PerVertexAttributeHandle<ScalarType> plof = tri::Allocator<MeshType>:: template GetPerVertexAttribute<ScalarType>(mesh, std::string("plof"));
#pragma omp parallel for schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
PriorityQueue queue;
kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue);
ScalarType sum = 0;
for (int j = 0; j < queue.getNofElements(); j++)
sum += queue.getWeight(j);
sum /= (queue.getNofElements());
sigma[i] = sqrt(sum);
}
float mean = 0;
#pragma omp parallel for reduction(+: mean) schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
PriorityQueue queue;
kdTree.doQueryK(mesh.vert[i].cP(), kNearest, queue);
ScalarType sum = 0;
for (int j = 0; j < queue.getNofElements(); j++)
sum += sigma[queue.getIndex(j)];
sum /= (queue.getNofElements());
plof[i] = sigma[i] / sum - 1.0f;
mean += plof[i] * plof[i];
}
mean /= mesh.vert.size();
mean = sqrt(mean);
#pragma omp parallel for schedule(dynamic, 10)
for (size_t i = 0; i < mesh.vert.size(); i++)
{
ScalarType value = plof[i] / (mean * sqrt(2.0f));
double dem = 1.0 + 0.278393 * value;
dem += 0.230389 * value * value;
dem += 0.000972 * value * value * value;
dem += 0.078108 * value * value * value * value;
ScalarType op = max(0.0, 1.0 - 1.0 / dem);
outlierScore[i] = op;
}
tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("sigma"));
tri::Allocator<MeshType>::DeletePerVertexAttribute(mesh, std::string("plof"));
};
/**
Select all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0].
*/
static int SelectLoOPOutliers(MeshType& mesh, KdTreeType& kdTree, int kNearest, float threshold)
{
ComputeLoOPScore(mesh, kdTree, kNearest);
int count = 0;
typename MeshType:: template PerVertexAttributeHandle<ScalarType> outlierScore = tri::Allocator<MeshType>::template GetPerVertexAttribute<ScalarType>(mesh, std::string("outlierScore"));
for (int i = 0; i < mesh.vert.size(); i++)
{
if (outlierScore[i] > threshold)
{
mesh.vert[i].SetS();
count++;
}
}
return count;
}
/**
Delete all the vertex of the mesh with an outlier probability above the input threshold [0.0, 1.0].
*/
static int DeleteLoOPOutliers(MeshType& m, KdTreeType& kdTree, int kNearest, float threshold)
{
SelectLoOPOutliers(m,kdTree,kNearest,threshold);
int ovn = m.vn;
for(typename MeshType::VertexIterator vi=m.vert.begin();vi!=m.vert.end();++vi)
if((*vi).IsS() ) tri::Allocator<MeshType>::DeleteVertex(m,*vi);
tri::Allocator<MeshType>::CompactVertexVector(m);
tri::Allocator<MeshType>::DeletePerVertexAttribute(m, std::string("outlierScore"));
return m.vn - ovn;
}
};
} // end namespace tri
} // end namespace vcg
#endif // VCG_TRI_OUTLIERS_H
|
pt_to_pt_pingping.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Contains the point-to-point pingping mixed mode */
/* OpenMP/MPI benchmarks. */
/* This includes: -masteronly pingping */
/* -funnelled pingping */
/* -multiple pingping */
/*-----------------------------------------------------------*/
#include "pt_to_pt_pingping.h"
/*-----------------------------------------------------------*/
/* pingPing */
/* */
/* Driver subroutine for the pingping benchmark. */
/*-----------------------------------------------------------*/
int pingPing(int benchmarkType) {
int dataSizeIter;
int sameNode;
pingRankA = PPRanks[0];
pingRankB = PPRanks[1];
/* Check if pingRankA and pingRankB are on the same node */
sameNode = compareProcNames(pingRankA, pingRankB);
if (myMPIRank == 0) {
/* print message saying if benchmark is inter or intra node */
printNodeReport(sameNode, pingRankA, pingRankB);
/* then print report column headings. */
printBenchHeader();
}
/* initialise repsToDo to defaultReps at start of benchmark */
repsToDo = defaultReps;
/* Loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */
while (dataSizeIter <= maxDataSize) {
/* set sizeofBuffer */
sizeofBuffer = dataSizeIter * numThreads;
/* Allocate space for main data arrays */
allocatePingpingData(sizeofBuffer);
/* warm-up for benchmarkType */
if (benchmarkType == MASTERONLY) {
/* Masteronly warmp sweep */
masteronlyPingping(warmUpIters, dataSizeIter);
} else if (benchmarkType == FUNNELLED) {
/* perform funnelled warm-up sweep */
funnelledPingping(warmUpIters, dataSizeIter);
} else if (benchmarkType == MULTIPLE) {
multiplePingping(warmUpIters, dataSizeIter);
}
/* perform verification test for the pingping */
testPingping(sizeofBuffer, dataSizeIter);
/* Initialise benchmark */
benchComplete = FALSE;
/* keep executing benchmark until target time is reached */
while (benchComplete != TRUE) {
/* Start the timer...MPI_Barrier to synchronise */
MPI_Barrier(comm);
startTime = MPI_Wtime();
if (benchmarkType == MASTERONLY) {
/* execute for repsToDo repetitions */
masteronlyPingping(repsToDo, dataSizeIter);
} else if (benchmarkType == FUNNELLED) {
funnelledPingping(repsToDo, dataSizeIter);
} else if (benchmarkType == MULTIPLE) {
multiplePingping(repsToDo, dataSizeIter);
}
/* Stop the timer...MPI_Barrier to synchronise processes */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Call repTimeCheck function to test if target time is reached */
if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); }
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark results */
if (myMPIRank == 0) {
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free the allocated space for the main data arrays */
freePingpingData();
/* Update dataSize before the next iteration */
dataSizeIter = dataSizeIter * 2; /* double data size */
}
return 0;
}
/*-----------------------------------------------------------*/
/* masteronlyPingping */
/* */
/* Two processes send a message to each other using the */
/* MPI_Isend, MPI_Recv and MPI_Wait routines. */
/* Inter-process communication takes place outside of the */
/* parallel region. */
/*-----------------------------------------------------------*/
int masteronlyPingping(int totalReps, int dataSize) {
int repIter, i;
int destRank;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA) {
destRank = pingRankB;
} else if (myMPIRank == pingRankB) {
destRank = pingRankA;
}
for (repIter = 0; repIter < totalReps; repIter++) {
if (myMPIRank == pingRankA || myMPIRank == pingRankB) {
/* Each thread writes its globalID to pingSendBuf
* using a PARALLEL DO directive.
*/
#pragma omp parallel for default(none) private(i) \
shared(pingSendBuf, dataSize, sizeofBuffer, globalIDarray) \
schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Process calls non-bloacking send to start transfer of pingSendBuf
* to other process.
*/
MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm,
&requestID);
/* Process then waits for message from other process. */
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm,
&status);
/* Finish the Send operation with an MPI_Wait */
MPI_Wait(&requestID, &status);
/* Each thread under the MPI process now reads its part of the
* received buffer.
*/
#pragma omp parallel for default(none) private(i) \
shared(finalRecvBuf, dataSize, sizeofBuffer, pingRecvBuf) \
schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) { finalRecvBuf[i] = pingRecvBuf[i]; }
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* funnelledPingPing */
/* */
/* Two processes send a message to each other using the */
/* MPI_Isend, MPI_Recv and MPI_Wait routines. */
/* Inter-process communication takes place inside the */
/* OpenMP parallel region. */
/*-----------------------------------------------------------*/
int funnelledPingping(int totalReps, int dataSize) {
int repIter, i;
int destRank;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA) {
destRank = pingRankB;
} else if (myMPIRank == pingRankB) {
destRank = pingRankA;
}
/* Open the parallel region */
#pragma omp parallel default(none) private(i, repIter) \
shared(dataSize, sizeofBuffer, pingSendBuf, globalIDarray) \
shared(pingRecvBuf, finalRecvBuf, status, requestID) \
shared(destRank, comm, myMPIRank, pingRankA, pingRankB, totalReps)
for (repIter = 0; repIter < totalReps; repIter++) {
if (myMPIRank == pingRankA || myMPIRank == pingRankB) {
/* Each thread writes its globalID to its part of
* pingSendBuf.
*/
#pragma omp for schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier here takes care of necessary synchronisation */
#pragma omp master
{
/* Master thread starts send of buffer */
MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm,
&requestID);
/* then waits for message from other process */
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, TAG, comm,
&status);
/* Master thread then completes send using an MPI_Wait */
MPI_Wait(&requestID, &status);
}
/* Barrier needed to ensure master thread has completed transfer */
#pragma omp barrier
/* Each thread reads its part of the received buffer */
#pragma omp for schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) { finalRecvBuf[i] = pingRecvBuf[i]; }
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* multiplePingping */
/* */
/* With this algorithm multiple threads take place in the */
/* communication and computation. */
/* Each thread sends its portion of the pingSendBuf to the */
/* other process using MPI_Isend/ MPI_Recv/ MPI_Wait */
/* routines. */
/*-----------------------------------------------------------*/
int multiplePingping(int totalReps, int dataSize) {
int repIter, i;
int destRank;
int lBound;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA) {
destRank = pingRankB;
} else if (myMPIRank == pingRankB) {
destRank = pingRankA;
}
/* Open parallel region */
#pragma omp parallel default(none) private(i, lBound, requestID, status, \
repIter) \
shared(pingSendBuf, pingRecvBuf, finalRecvBuf, sizeofBuffer) \
shared(destRank, myMPIRank, pingRankA, pingRankB, totalReps) \
shared(dataSize, globalIDarray, comm)
{
for (repIter = 0; repIter < totalReps; repIter++) {
if (myMPIRank == pingRankA || myMPIRank == pingRankB) {
/* Calculate the lower bound of each threads
* portion of the data arrays.
*/
lBound = (myThreadID * dataSize);
/* Each thread writes to its part of pingSendBuf */
#pragma omp for nowait schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Each thread starts send of dataSize items of
* pingSendBuf to process with rank = destRank.
*/
MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, destRank, myThreadID,
comm, &requestID);
/* Thread then waits for message from destRank with
* tag equal to it thread id.
*/
MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, myThreadID,
comm, &status);
/* Thread completes send using MPI_Wait */
MPI_Wait(&requestID, &status);
/* Each thread reads its part of received buffer. */
#pragma omp for nowait schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) { finalRecvBuf[i] = pingRecvBuf[i]; }
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocatePingpingData */
/* */
/* Allocates space for the main data arrays. */
/* Size of each array is specified by subroutine argument. */
/*-----------------------------------------------------------*/
int allocatePingpingData(int sizeofBuffer) {
pingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));
pingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
finalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freePingpingData */
/* */
/* Deallocates the storage space for the main data arrays. */
/*-----------------------------------------------------------*/
int freePingpingData() {
free(pingSendBuf);
free(pingRecvBuf);
free(finalRecvBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testPingping */
/* */
/* Verifies that the PingPing benchmark worked correctly. */
/*-----------------------------------------------------------*/
int testPingping(int sizeofBuffer, int dataSize) {
int otherPingRank, i, testFlag, reduceFlag;
int *testBuf;
/* initialise testFlag to true (test passed) */
testFlag = TRUE;
/* Testing only needs to be done by pingRankA & pingRankB */
if (myMPIRank == pingRankA || myMPIRank == pingRankB) {
/* allocate space for testBuf */
testBuf = (int *)malloc(sizeofBuffer * sizeof(int));
/* set the ID of other pingRank */
if (myMPIRank == pingRankA) {
otherPingRank = pingRankB;
} else if (myMPIRank == pingRankB) {
otherPingRank = pingRankA;
}
/* construct testBuf array with correct values.
* These are the values that should be in finalRecvBuf.
*/
#pragma omp parallel for default(none) private(i) \
shared(otherPingRank, numThreads, testBuf, dataSize, sizeofBuffer) \
schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
/* calculate globalID of thread expected in finalRecvBuf
* This is done by using otherPingRank
*/
testBuf[i] = (otherPingRank * numThreads) + myThreadID;
}
/* compare each element of testBuf and finalRecvBuf */
for (i = 0; i < sizeofBuffer; i++) {
if (testBuf[i] != finalRecvBuf[i]) { testFlag = FALSE; }
}
/* free space for testBuf */
free(testBuf);
}
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master process sets the testOutcome using testFlag. */
if (myMPIRank == 0) { setTestOutcome(reduceFlag); }
return 0;
}
|
parallel_for.c | #include <stdio.h>
#include <omp.h>
#define N 50
#define CHUNKSIZE 5
int main(int argc, char *argv[]){
int i, chunk, tid;
float a[N], b[N], c[N];
for (i = 0; i < N; i++){
a[i] = b[i] = i * 1.0;
};
chunk = CHUNKSIZE;
#pragma omp parallel for \
shared(a, b, c, chunk) private(i, tid) \
schedule(dynamic, chunk)
for(i = N - 1; i >= 0; i--){
tid = omp_get_thread_num();
if (i % chunk == 0){
printf("Hello World from thread = %d\n", tid);
}
c[i] = a[i] + b[i];
};
#pragma omp parallel for
for(i = 0; i < N; i++){
printf("c[%d] = %f\n", i, c[i]);
};
return 0;
};
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(4*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t2+Nx,128),floord(4*t3+Nx,128)),floord(Nt+Nx-4,128)),floord(2*t1+Nx+1,128)),floord(4*t1-4*t2+Nz+Nx-1,128));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),128*t4+126),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
int target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
*/
template<typename DType>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
out[a * row + b] = in[b * col + a];
}
}
}
}
}
template<typename xpu>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType>(ret, src, axes, s);
});
return;
}
#endif
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 0: {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(1), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(1), s);
Copy(out, in, s);
break;
}
case 1: {
Tensor<xpu, 1, DType> in = src.get<xpu, 1, DType>(s);
Tensor<xpu, 1, DType> out = ret.get<xpu, 1, DType>(s);
Copy(out, in, s);
break;
}
case 2: {
mshadow::Tensor<xpu, 2, DType> in = src.FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out = ret.FlatTo2D<xpu, DType>(s);
if (axes[0] == 1 && axes[1] == 0) {
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
out = in.T();
}
} else {
Copy(out, in, s);
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
out = transpose(in, axes.get<3>());
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
out = transpose(in, axes.get<4>());
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
out = transpose(in, axes.get<5>());
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
out = transpose(in, axes.get<6>());
break;
}
default:
LOG(FATAL) << "Transpose support at most 6 dimensions";
break;
}
});
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "Transpose does not support kWriteInplace and kAddTo";
if (param.axes.ndim() == 0) {
mxnet::TShape axes(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
TransposeImpl<xpu>(ctx.run_ctx, inputs[0], outputs[0], axes);
} else {
TransposeImpl<xpu>(ctx.run_ctx, inputs[0], outputs[0], param.axes);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions";
if (shp.ndim() == -1 && out_shp.ndim() == -1)
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) {
return false;
}
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size = ishape[axis] / sections;
for (int i = 0; i < sections; ++i) {
indices[i+1] = section_size * (i + 1);
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
saxpy.c | /*
* File: saxpy.c
* Author: Malcolm Davis
* Course: Computer Architecture II
* Created on Feb 24, 2018
* Simple SAXPY(Single-precision Alpha*X Plus Y) operation with OpenMP
*
* Ussage:
* ./argv[0] for default parameters and random vectors or;
* ./argv[0] <array size>
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <omp.h>
#define FLOAT_RAND_MAX 10000
#define VECTOR_SIZE 1000
void generateFloatVector(long size, float* vector);
void printFloatVector(long size, float* vector);
void SAXPY(long size, float alpha, float* X, float* Y);
/*
* Main method, retrive command line options, create the threads
*/
int main(int argc, char const *argv[])
{
const int printVectors = getenv("PRINT_VECTORS") ? 1 : 0;
double start_time, run_time;
srand(time(NULL));
// If the vector size is inserted then use it if not then use the default
long size = argc > 1 && atol(argv[1]) > 0 ? atol(argv[1]) : VECTOR_SIZE;
float alpha = ((float)rand()/(float)(RAND_MAX)) * FLOAT_RAND_MAX;
// Allocate memmory for the vectors
float* X = (float*)malloc(sizeof(float)*size);
float* Y = (float*)malloc(sizeof(float)*size);
// Generate random vectors
generateFloatVector(size, X);
generateFloatVector(size, Y);
// Print the vectors
if(printVectors){
printf("----Y=aX+Y----\na: %f\n", alpha);
printf("X: ");
printFloatVector(size, X);
printf("Y: ");
printFloatVector(size, Y);
}
//Do the actual SAXPY and take the time
start_time = omp_get_wtime();
SAXPY(size, alpha, X, Y);
run_time = omp_get_wtime() - start_time;
//Print the result
if(printVectors){
printf("Y: ");
printFloatVector(size, Y);
}
printf("Size: %ld Seconds: %f \n", size, run_time);
// Free the allocated memmory
free(Y);
free(X);
return 0;
}
/*
* SAXPY Function Y = aX + Y
* @param size the lenght of the vectors
* @param alpha the const to scale the vector X
* @param X a vector of floats
* @param Y a vector of floats
*/
void SAXPY(long size, float alpha, float* X, float* Y){
long i;
#ifdef PARALLEL
#pragma omp parallel for private(i) shared(size, alpha, X, Y)
#endif
for(i = 0; i<size; i++){
Y[i]=alpha*X[i] + Y[i];
}
}
/*
* Function that fills a vector of size "size" with random numbers
* @param (INPUT)size the length of the vector
* @param (OUTPUT)vector the place where the data will be stored.
*/
void generateFloatVector(long size, float* vector){
for(long i=0; i<size;i++){
vector[i] = ((float)rand()/(float)(RAND_MAX)) * FLOAT_RAND_MAX;
}
}
/*
* Function that prints a vector on screen
* @param (INPUT)size the length of the vector
* @param (INPUT)vector the place where the data will be stored.
*/
void printFloatVector(long size, float* vector){
printf("[");
for(long i=0; i<size;i++){
printf(" %f ", vector[i]);
}
printf("]\n");
} |
convolution_3x3_int8.h | // SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline short saturate2int16(int v)
{
if (v > 32767) return 32767;
if (v < -32768) return -32768;
return (short)v;
}
static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
short sum0 = 0;
short sum1 = 0;
short sum2 = 0;
sum0 += (short)r0[0] * kernel0[0];
sum0 += (short)r0[1] * kernel0[1];
sum0 += (short)r0[2] * kernel0[2];
sum1 += (short)r1[0] * kernel0[3];
sum1 += (short)r1[1] * kernel0[4];
sum1 += (short)r1[2] * kernel0[5];
sum2 += (short)r2[0] * kernel0[6];
sum2 += (short)r2[1] * kernel0[7];
sum2 += (short)r2[2] * kernel0[8];
*outptr0 = saturate2int16(*outptr0 + sum0);
*outptr0 = saturate2int16(*outptr0 + sum1);
*outptr0 = saturate2int16(*outptr0 + sum2);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
DRB025-simdtruedep-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has race condition due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len], b[len];
#pragma omp parallel for private(i)
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
for (i=0;i<len-1;i++)
a[i+1]=a[i]*b[i];
for (i=0;i<len;i++) {
printf("%d %d\n", a[i], b[i]);
}
return 0;
}
|
GraphBLAS.h | //------------------------------------------------------------------------------
// GraphBLAS.h: definitions for the GraphBLAS package
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS is a complete implementation of the GraphBLAS
// standard, which defines a set of sparse matrix operations on an extended
// algebra of semirings, using an almost unlimited variety of operators and
// types. When applied to sparse adjacency matrices, these algebraic
// operations are equivalent to computations on graphs. GraphBLAS provides a
// powerful and expressive framework creating graph algorithms based on the
// elegant mathematics of sparse matrix operations on a semiring.
// This GraphBLAS.h file contains GraphBLAS definitions for user applications
// to #include. A few functions and variables with the prefix GB_ need to be
// defined in this file and are thus technically visible to the user, but they
// must not be accessed in user code. They are here only so that the ANSI C11
// _Generic feature can be used in the user-accessible polymorphic functions,
// or to implement a fast GxB_Iterator using macros.
// This implementation conforms to the GraphBLAS API Specification and also
// includes functions and features that are extensions to the spec, which are
// given names of the form GxB_* for functions, built-in objects, and macros,
// so it is clear which are in the spec and which are extensions. Extensions
// with the name GxB_* are user-accessible in SuiteSparse:GraphBLAS but cannot
// be guaranteed to appear in all GraphBLAS implementations.
// Regarding "historical" functions and symbols: when a GxB_* function or
// symbol is added to the C API Specification, the new GrB_* name should be
// used instead. The old GxB_* name will be kept for historical reasons,
// documented here and in working order; it might no longer be mentioned in the
// user guide. Historical functions and symbols would only be removed in the
// rare case that they cause a serious conflict with future methods.
#ifndef GRAPHBLAS_H
#define GRAPHBLAS_H
//==============================================================================
// include files required by GraphBLAS
//==============================================================================
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <stddef.h>
#include <limits.h>
#include <math.h>
#include <stdarg.h>
//==============================================================================
// renaming for use in R2021a or later
//==============================================================================
#define GB_CAT2(x,y) x ## y
#define GB_EVAL2(x,y) GB_CAT2 (x,y)
#ifdef GBRENAME
// All symbols must be renamed for the @GrB interface when using
// R2021a and following, since those versions include an earlier
// version of SuiteSparse:GraphBLAS.
#define GB(x) GB_EVAL2 (GM_, x)
#define GRB(x) GB_EVAL2 (GrM_, x)
#define GXB(x) GB_EVAL2 (GxM_, x)
#define GrB GrM
#define GxB GxM
#include "GB_rename.h"
#else
// Use the standard GraphBLAS prefix.
#define GB(x) GB_EVAL2 (GB_, x)
#define GRB(x) GB_EVAL2 (GrB_, x)
#define GXB(x) GB_EVAL2 (GxB_, x)
#endif
//==============================================================================
// compiler variations
//==============================================================================
// Exporting/importing symbols for Microsoft Visual Studio
#if ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) )
#ifdef GB_LIBRARY
// compiling SuiteSparse:GraphBLAS itself, exporting symbols to user apps
#define GB_PUBLIC extern __declspec ( dllexport )
#else
// compiling the user application, importing symbols from SuiteSparse:GraphBLAS
#define GB_PUBLIC extern __declspec ( dllimport )
#endif
#else
// for other compilers
#define GB_PUBLIC extern
#endif
// GraphBLAS requires an ANSI C11 compiler for its polymorphic functions (using
// the _Generic keyword), but it can be used in an C90 compiler if those
// functions are disabled.
// With ANSI C11 and later, _Generic keyword and polymorphic functions can be
// used. Earlier versions of the language do not have this feature.
#ifdef __STDC_VERSION__
// ANSI C17: 201710L
// ANSI C11: 201112L
// ANSI C99: 199901L
// ANSI C95: 199409L
#define GxB_STDC_VERSION __STDC_VERSION__
#else
// assume ANSI C90 / C89
#define GxB_STDC_VERSION 199001L
#endif
//------------------------------------------------------------------------------
// definitions for complex types, and restrict keyword
//------------------------------------------------------------------------------
#undef GB_restrict
// See:
// https://www.drdobbs.com/complex-arithmetic-in-the-intersection-o/184401628#
#if defined ( __cplusplus )
extern "C++"
{
// C++ complex types
#include <cmath>
#include <complex>
#undef I
typedef std::complex<float> GxB_FC32_t ;
typedef std::complex<double> GxB_FC64_t ;
}
#define GxB_CMPLXF(r,i) GxB_FC32_t(r,i)
#define GxB_CMPLX(r,i) GxB_FC64_t(r,i)
#define GB_restrict
#elif ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) )
// Microsoft Windows complex types
#include <complex.h>
#undef I
typedef _Fcomplex GxB_FC32_t ;
typedef _Dcomplex GxB_FC64_t ;
#define GxB_CMPLXF(r,i) (_FCbuild (r,i))
#define GxB_CMPLX(r,i) ( _Cbuild (r,i))
#define GB_restrict __restrict
#else
// ANSI C11 complex types
#include <complex.h>
#undef I
typedef float complex GxB_FC32_t ;
typedef double complex GxB_FC64_t ;
#ifndef CMPLX
// gcc 6.2 on the the Mac doesn't #define CMPLX
#define GxB_CMPLX(r,i) \
((GxB_FC64_t)((double)(r)) + (GxB_FC64_t)((double)(i) * _Complex_I))
#else
// use the ANSI C11 CMPLX macro
#define GxB_CMPLX(r,i) CMPLX (r,i)
#endif
#ifndef CMPLXF
// gcc 6.2 on the the Mac doesn't #define CMPLXF
#define GxB_CMPLXF(r,i) \
((GxB_FC32_t)((float)(r)) + (GxB_FC32_t)((float)(i) * _Complex_I))
#else
// use the ANSI C11 CMPLXF macro
#define GxB_CMPLXF(r,i) CMPLXF (r,i)
#endif
// restrict keyword
#if defined ( __NVCC__ )
// NVIDIA nvcc
#define GB_restrict __restrict__
#elif GxB_STDC_VERSION >= 199901L
// ANSI C99 or later
#define GB_restrict restrict
#else
// ANSI C95 and earlier: no restrict keyword
#define GB_restrict
#endif
#endif
//==============================================================================
// version control
//==============================================================================
// There are two version numbers that user codes can check against with
// compile-time #if tests: the version of this GraphBLAS implementation,
// and the version of the GraphBLAS specification it conforms to. User code
// can use tests like this:
//
// #if GxB_SPEC_VERSION >= GxB_VERSION (2,0,3)
// ... use features in GraphBLAS specification 2.0.3 ...
// #else
// ... only use features in early specifications
// #endif
//
// #if GxB_IMPLEMENTATION > GxB_VERSION (1,4,0)
// ... use features from version 1.4.0 of a GraphBLAS package
// #endif
// X_GRAPHBLAS: names this particular implementation:
#define GxB_SUITESPARSE_GRAPHBLAS
// GxB_VERSION: a single integer for comparing spec and version levels
#define GxB_VERSION(major,minor,sub) \
(((major)*1000ULL + (minor))*1000ULL + (sub))
// The version of this implementation, and the GraphBLAS API version:
#define GxB_IMPLEMENTATION_NAME "SuiteSparse:GraphBLAS"
#define GxB_IMPLEMENTATION_DATE "Mar 14, 2022"
#define GxB_IMPLEMENTATION_MAJOR 6
#define GxB_IMPLEMENTATION_MINOR 2
#define GxB_IMPLEMENTATION_SUB 5
#define GxB_SPEC_DATE "Nov 15, 2021"
#define GxB_SPEC_MAJOR 2
#define GxB_SPEC_MINOR 0
#define GxB_SPEC_SUB 0
// compile-time access to the C API Version number of this library.
#define GRB_VERSION GxB_SPEC_MAJOR
#define GRB_SUBVERSION GxB_SPEC_MINOR
#define GxB_IMPLEMENTATION \
GxB_VERSION (GxB_IMPLEMENTATION_MAJOR, \
GxB_IMPLEMENTATION_MINOR, \
GxB_IMPLEMENTATION_SUB)
// The 'about' string the describes this particular implementation of GraphBLAS:
#define GxB_IMPLEMENTATION_ABOUT \
"SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \
"\nhttp://suitesparse.com Dept of Computer Sci. & Eng, Texas A&M University.\n"
// The GraphBLAS license for this particular implementation of GraphBLAS:
#define GxB_IMPLEMENTATION_LICENSE \
"SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \
"\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\n"\
"not use SuiteSparse:GraphBLAS except in compliance with the License. You\n" \
"may obtain a copy of the License at\n\n" \
" http://www.apache.org/licenses/LICENSE-2.0\n\n" \
"Unless required by applicable law or agreed to in writing, software\n" \
"distributed under the License is distributed on an \"AS IS\" BASIS,\n" \
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" \
"See the License for the specific language governing permissions and\n" \
"limitations under the License.\n"
//------------------------------------------------------------------------------
// GraphBLAS C API version
//------------------------------------------------------------------------------
#define GxB_SPEC_VERSION GxB_VERSION(GxB_SPEC_MAJOR,GxB_SPEC_MINOR,GxB_SPEC_SUB)
// The 'spec' string describes the GraphBLAS spec:
#define GxB_SPEC_ABOUT \
"GraphBLAS C API, by Aydin Buluc, Timothy Mattson, Scott McMillan,\n" \
"Jose' Moreira, Carl Yang, and Benjamin Brock. Based on 'GraphBLAS\n" \
"Mathematics by Jeremy Kepner. See also 'Graph Algorithms in the Language\n" \
"of Linear Algebra,' edited by J. Kepner and J. Gilbert, SIAM, 2011.\n"
//==============================================================================
// GrB_Index: the GraphBLAS integer
//==============================================================================
// GrB_Index: row or column index, or matrix dimension. This typedef is used
// for row and column indices, or matrix and vector dimensions.
typedef uint64_t GrB_Index ;
// GrB_INDEX_MAX is the largest permissible index value. The largest valid
// matrix or vector dimension is GrB_INDEX_MAX+1, or 2^60 in SuiteSparse:GrB.
#define GrB_INDEX_MAX ((GrB_Index) (1ULL << 60) - 1)
// GxB_INDEX_MAX is historical; use GrB_INDEX_MAX+1 instead. It differs by one
// from GrB_INDEX_MAX, since it defined the largest valid matrix or vector
// dimension.
#define GxB_INDEX_MAX ((GrB_Index) (1ULL << 60))
//==============================================================================
// GraphBLAS error and informational codes
//==============================================================================
// All GraphBLAS functions return a code that indicates if it was successful
// or not. If more information is required, the GrB_error function can be
// called, which returns a string that provides more information on the last
// return value from GraphBLAS.
// The v1.3 C API did not specify the enum values, but they appear in v2.0.
// Changing them will require SuiteSparse:GraphBLAS to bump to v6.x.
// Error codes GrB_NOT_IMPLEMENTED and GrB_EMPTY_OBJECT are new to v2.0.
typedef enum
{
GrB_SUCCESS = 0, // all is well
//--------------------------------------------------------------------------
// informational codes, not an error:
//--------------------------------------------------------------------------
GrB_NO_VALUE = 1, // A(i,j) requested but not there
GxB_EXHAUSTED = 2, // iterator is exhausted
//--------------------------------------------------------------------------
// errors:
//--------------------------------------------------------------------------
GrB_UNINITIALIZED_OBJECT = -1, // object has not been initialized
GrB_NULL_POINTER = -2, // input pointer is NULL
GrB_INVALID_VALUE = -3, // generic error; some value is bad
GrB_INVALID_INDEX = -4, // row or column index is out of bounds
GrB_DOMAIN_MISMATCH = -5, // object domains are not compatible
GrB_DIMENSION_MISMATCH = -6, // matrix dimensions do not match
GrB_OUTPUT_NOT_EMPTY = -7, // output matrix already has values
GrB_NOT_IMPLEMENTED = -8, // method not implemented
GrB_PANIC = -101, // unknown error
GrB_OUT_OF_MEMORY = -102, // out of memory
GrB_INSUFFICIENT_SPACE = -103, // output array not large enough
GrB_INVALID_OBJECT = -104, // object is corrupted
GrB_INDEX_OUT_OF_BOUNDS = -105, // row or col index out of bounds
GrB_EMPTY_OBJECT = -106 // an object does not contain a value
}
GrB_Info ;
//==============================================================================
// GrB_init / GrB_finalize
//==============================================================================
// GrB_init must called before any other GraphBLAS operation. GrB_finalize
// must be called as the last GraphBLAS operation.
// GrB_init defines the mode that GraphBLAS will use: blocking or
// non-blocking. With blocking mode, all operations finish before returning to
// the user application. With non-blocking mode, operations can be left
// pending, and are computed only when needed.
// The extension GxB_init does the work of GrB_init, but it also defines the
// memory management functions that SuiteSparse:GraphBLAS will use internally.
typedef enum
{
GrB_NONBLOCKING = 0, // methods may return with pending computations
GrB_BLOCKING = 1 // no computations are ever left pending
}
GrB_Mode ;
GB_PUBLIC
GrB_Info GrB_init // start up GraphBLAS
(
GrB_Mode mode // blocking or non-blocking mode
) ;
GB_PUBLIC
GrB_Info GxB_init // start up GraphBLAS and also define malloc, etc
(
GrB_Mode mode, // blocking or non-blocking mode
// pointers to memory management functions
void * (* user_malloc_function ) (size_t),
void * (* user_calloc_function ) (size_t, size_t),
void * (* user_realloc_function ) (void *, size_t),
void (* user_free_function ) (void *)
) ;
GB_PUBLIC
GrB_Info GrB_finalize (void) ; // finish GraphBLAS
//==============================================================================
// GrB_getVersion: GraphBLAS C API version
//==============================================================================
// GrB_getVersion provides a runtime access of the C API Version.
GB_PUBLIC
GrB_Info GrB_getVersion // runtime access to C API version number
(
unsigned int *version, // returns GRB_VERSION
unsigned int *subversion // returns GRB_SUBVERSION
) ;
//==============================================================================
// GrB_Descriptor: the GraphBLAS descriptor
//==============================================================================
// The GrB_Descriptor is used to modify the behavior of GraphBLAS operations.
//
// GrB_OUTP: can be GxB_DEFAULT or GrB_REPLACE. If GrB_REPLACE, then C is
// cleared after taking part in the accum operation but before the mask.
// In other words, C<Mask> = accum (C,T) is split into Z = accum(C,T) ;
// C=0 ; C<Mask> = Z.
//
// GrB_MASK: can be GxB_DEFAULT, GrB_COMP, GrB_STRUCTURE, or set to both
// GrB_COMP and GrB_STRUCTURE. If GxB_DEFAULT, the mask is used
// normally, where Mask(i,j)=1 means C(i,j) can be modified by C<Mask>=Z,
// and Mask(i,j)=0 means it cannot be modified even if Z(i,j) is has been
// computed and differs from C(i,j). If GrB_COMP, this is the same as
// taking the logical complement of the Mask. If GrB_STRUCTURE is set,
// the value of the mask is not considered, just its pattern. The
// GrB_COMP and GrB_STRUCTURE settings can be combined.
//
// GrB_INP0: can be GxB_DEFAULT or GrB_TRAN. If GxB_DEFAULT, the first input
// is used as-is. If GrB_TRAN, it is transposed. Only matrices are
// transposed this way. Vectors are never transposed via the
// GrB_Descriptor.
//
// GrB_INP1: the same as GrB_INP0 but for the second input
//
// GxB_NTHREADS: the maximum number of threads to use in the current method.
// If <= GxB_DEFAULT (which is zero), then the number of threads is
// determined automatically. This is the default value.
//
// GxB_CHUNK: an integer parameter that determines the number of threads to use
// for a small problem. If w is the work to be performed, and chunk is
// the value of this parameter, then the # of threads is limited to floor
// (w/chunk). The default chunk is currently 64K, but this may change in
// the future. If chunk is set to <= GxB_DEFAULT (that is, zero), the
// default is used.
//
// GxB_AxB_METHOD: this is a hint to SuiteSparse:GraphBLAS on which algorithm
// it should use to compute C=A*B, in GrB_mxm, GrB_mxv, and GrB_vxm.
// SuiteSparse:GraphBLAS has four different heuristics, and the default
// method (GxB_DEFAULT) selects between them automatically. The complete
// rule is in the User Guide. The brief discussion here assumes all
// matrices are stored by column. All methods compute the same result,
// except that floating-point roundoff may differ when working on
// floating-point data types.
//
// GxB_AxB_SAXPY: C(:,j)=A*B(:,j) is computed using a mix of Gustavson
// and Hash methods. Each task in the parallel computation makes its
// own decision between these two methods, via a heuristic.
//
// GxB_AxB_GUSTAVSON: This is the same as GxB_AxB_SAXPY, except that
// every task uses Gustavon's method, computing C(:,j)=A*B(:,j) via a
// gather/scatter workspace of size equal to the number of rows of A.
// Very good general-purpose method, but sometimes the workspace can
// be too large when many threads are used.
//
// GxB_AxB_HASH: This is the same as GxB_AxB_SAXPY, except that every
// task uses the Hash method. It is very good for hypersparse
// matrices and uses very little workspace, and so it scales well to
// many threads.
//
// GxB_AxB_DOT: computes C(i,j) = A(:,i)'*B(:,j), for each entry C(i,j).
// A very specialized method that works well only if the mask is
// present, very sparse, and not complemented, or when C is a dense
// vector or matrix, or when C is small.
//
// GxB_SORT: GrB_mxm and other methods may return a matrix in a 'jumbled'
// state, with indices out of order. The sort is left pending. Some
// methods can tolerate jumbled matrices on input, so this can be faster.
// However, in some cases, it can be faster for GrB_mxm to sort its output
// as it is computed. With GxB_SORT set to GxB_DEFAULT, the sort is left
// pending. With GxB_SORT set to a nonzero value, GrB_mxm typically sorts
// the resulting matrix C (but not always; this is just a hint). If
// GrB_init is called with GrB_BLOCKING mode, the sort will always be
// done, and this setting has no effect.
//
// GxB_COMPRESSION: compression method for GxB_Matrix_serialize and
// GxB_Vector_serialize. The default is LZ4.
//
// GxB_IMPORT: GxB_FAST_IMPORT (faster, for trusted input data) or
// GxB_SECURE_IMPORT (slower, for untrusted input data), for the
// GxB*_pack* methods.
// The following are enumerated values in both the GrB_Desc_Field and the
// GxB_Option_Field for global options. They are defined with the same integer
// value for both enums, so the user can use them for both.
#define GxB_NTHREADS 5
#define GxB_CHUNK 7
// GPU control (DRAFT: in progress, do not use)
#define GxB_GPU_CONTROL 21
#define GxB_GPU_CHUNK 22
typedef enum
{
GrB_OUTP = 0, // descriptor for output of a method
GrB_MASK = 1, // descriptor for the mask input of a method
GrB_INP0 = 2, // descriptor for the first input of a method
GrB_INP1 = 3, // descriptor for the second input of a method
GxB_DESCRIPTOR_NTHREADS = GxB_NTHREADS, // max number of threads to use.
// If <= GxB_DEFAULT, then GraphBLAS selects the number
// of threads automatically.
GxB_DESCRIPTOR_CHUNK = GxB_CHUNK, // chunk size for small problems.
// If <= GxB_DEFAULT, then the default is used.
// GPU control (DRAFT: in progress, do not use)
GxB_DESCRIPTOR_GPU_CONTROL = GxB_GPU_CONTROL,
GxB_DESCRIPTOR_GPU_CHUNK = GxB_GPU_CHUNK,
GxB_AxB_METHOD = 1000, // descriptor for selecting C=A*B algorithm
GxB_SORT = 35, // control sort in GrB_mxm
GxB_COMPRESSION = 36, // select compression for serialize
GxB_IMPORT = 37, // secure vs fast import
}
GrB_Desc_Field ;
typedef enum
{
// for all GrB_Descriptor fields:
GxB_DEFAULT = 0, // default behavior of the method
// for GrB_OUTP only:
GrB_REPLACE = 1, // clear the output before assigning new values to it
// for GrB_MASK only:
GrB_COMP = 2, // use the structural complement of the input
GrB_STRUCTURE = 4, // use the only pattern of the mask, not its values
// for GrB_INP0 and GrB_INP1 only:
GrB_TRAN = 3, // use the transpose of the input
// for GxB_GPU_CONTROL only (DRAFT: in progress, do not use)
GxB_GPU_ALWAYS = 2001,
GxB_GPU_NEVER = 2002,
// for GxB_AxB_METHOD only:
GxB_AxB_GUSTAVSON = 1001, // gather-scatter saxpy method
GxB_AxB_DOT = 1003, // dot product
GxB_AxB_HASH = 1004, // hash-based saxpy method
GxB_AxB_SAXPY = 1005, // saxpy method (any kind)
// for GxB_IMPORT only:
GxB_SECURE_IMPORT = 502 // GxB*_pack* methods trust their input data
}
GrB_Desc_Value ;
// default for GxB pack is to trust the input data
#define GxB_FAST_IMPORT GxB_DEFAULT
typedef struct GB_Descriptor_opaque *GrB_Descriptor ;
GB_PUBLIC
GrB_Info GrB_Descriptor_new // create a new descriptor
(
GrB_Descriptor *descriptor // handle of descriptor to create
) ;
GB_PUBLIC
GrB_Info GrB_Descriptor_set // set a parameter in a descriptor
(
GrB_Descriptor desc, // descriptor to modify
GrB_Desc_Field field, // parameter to change
GrB_Desc_Value val // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Descriptor_get // get a parameter from a descriptor
(
GrB_Desc_Value *val, // value of the parameter
GrB_Descriptor desc, // descriptor to query; NULL means defaults
GrB_Desc_Field field // parameter to query
) ;
GB_PUBLIC
GrB_Info GxB_Desc_set // set a parameter in a descriptor
(
GrB_Descriptor desc, // descriptor to modify
GrB_Desc_Field field, // parameter to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Desc_get // get a parameter from a descriptor
(
GrB_Descriptor desc, // descriptor to query; NULL means defaults
GrB_Desc_Field field, // parameter to query
... // value of the parameter
) ;
GB_PUBLIC
GrB_Info GrB_Descriptor_free // free a descriptor
(
GrB_Descriptor *descriptor // handle of descriptor to free
) ;
// Predefined descriptors and their values:
GB_PUBLIC
GrB_Descriptor // OUTP MASK MASK INP0 INP1
// structural complement
// =========== ============== ========== ======== ========
// GrB_NULL // - - - - -
GrB_DESC_T1 , // - - - - GrB_TRAN
GrB_DESC_T0 , // - - - GrB_TRAN -
GrB_DESC_T0T1 , // - - - GrB_TRAN GrB_TRAN
GrB_DESC_C , // - - GrB_COMP - -
GrB_DESC_CT1 , // - - GrB_COMP - GrB_TRAN
GrB_DESC_CT0 , // - - GrB_COMP GrB_TRAN -
GrB_DESC_CT0T1 , // - - GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_S , // - GrB_STRUCTURE - - -
GrB_DESC_ST1 , // - GrB_STRUCTURE - - GrB_TRAN
GrB_DESC_ST0 , // - GrB_STRUCTURE - GrB_TRAN -
GrB_DESC_ST0T1 , // - GrB_STRUCTURE - GrB_TRAN GrB_TRAN
GrB_DESC_SC , // - GrB_STRUCTURE GrB_COMP - -
GrB_DESC_SCT1 , // - GrB_STRUCTURE GrB_COMP - GrB_TRAN
GrB_DESC_SCT0 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN -
GrB_DESC_SCT0T1 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_R , // GrB_REPLACE - - - -
GrB_DESC_RT1 , // GrB_REPLACE - - - GrB_TRAN
GrB_DESC_RT0 , // GrB_REPLACE - - GrB_TRAN -
GrB_DESC_RT0T1 , // GrB_REPLACE - - GrB_TRAN GrB_TRAN
GrB_DESC_RC , // GrB_REPLACE - GrB_COMP - -
GrB_DESC_RCT1 , // GrB_REPLACE - GrB_COMP - GrB_TRAN
GrB_DESC_RCT0 , // GrB_REPLACE - GrB_COMP GrB_TRAN -
GrB_DESC_RCT0T1 , // GrB_REPLACE - GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_RS , // GrB_REPLACE GrB_STRUCTURE - - -
GrB_DESC_RST1 , // GrB_REPLACE GrB_STRUCTURE - - GrB_TRAN
GrB_DESC_RST0 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN -
GrB_DESC_RST0T1 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN GrB_TRAN
GrB_DESC_RSC , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - -
GrB_DESC_RSCT1 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - GrB_TRAN
GrB_DESC_RSCT0 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN -
GrB_DESC_RSCT0T1 ; // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN
// GrB_NULL is the default descriptor, with all settings at their defaults:
//
// OUTP: do not replace the output
// MASK: mask is valued and not complemented
// INP0: first input not transposed
// INP1: second input not transposed
// Predefined descriptors may not be modified or freed. Attempting to modify
// them results in an error (GrB_INVALID_VALUE). Attempts to free them are
// silently ignored.
//==============================================================================
// GrB_Type: data types
//==============================================================================
typedef struct GB_Type_opaque *GrB_Type ;
// GraphBLAS predefined types and their counterparts in pure C:
GB_PUBLIC GrB_Type
GrB_BOOL , // in C: bool
GrB_INT8 , // in C: int8_t
GrB_INT16 , // in C: int16_t
GrB_INT32 , // in C: int32_t
GrB_INT64 , // in C: int64_t
GrB_UINT8 , // in C: uint8_t
GrB_UINT16 , // in C: uint16_t
GrB_UINT32 , // in C: uint32_t
GrB_UINT64 , // in C: uint64_t
GrB_FP32 , // in C: float
GrB_FP64 , // in C: double
GxB_FC32 , // in C: float complex
GxB_FC64 ; // in C: double complex
//------------------------------------------------------------------------------
// helper macros for polymorphic functions
//------------------------------------------------------------------------------
#define GB_CAT(w,x,y,z) w ## x ## y ## z
#define GB_CONCAT(w,x,y,z) GB_CAT (w, x, y, z)
#if GxB_STDC_VERSION >= 201112L
#define GB_CASES(p,prefix,func) \
const bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \
bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \
const int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \
int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \
const int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \
int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \
const int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \
int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \
const int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \
int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \
const uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \
uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \
const uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \
uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \
const uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \
uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \
const uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \
uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \
const float p : GB_CONCAT ( prefix, _, func, _FP32 ), \
float p : GB_CONCAT ( prefix, _, func, _FP32 ), \
const double p : GB_CONCAT ( prefix, _, func, _FP64 ), \
double p : GB_CONCAT ( prefix, _, func, _FP64 ), \
const GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \
GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \
const GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \
GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \
const void * : GB_CONCAT ( prefix, _, func, _UDT ), \
void * : GB_CONCAT ( prefix, _, func, _UDT )
#endif
//------------------------------------------------------------------------------
// GrB_Type_new: create a new type
//------------------------------------------------------------------------------
// GrB_Type_new is implemented both as a macro and a function. Both are
// user-callable. The default is to use the macro, since this allows the name
// of the type to be saved as a string, for subsequent error reporting by
// GrB_error.
#undef GrB_Type_new
#undef GrM_Type_new
GB_PUBLIC
GrB_Info GRB (Type_new) // create a new GraphBLAS type
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype // size = sizeof (ctype) of the C type
) ;
// user code should not directly use GB_STR or GB_XSTR
// GB_STR: convert the content of x into a string "x"
#define GB_XSTR(x) GB_STR(x)
#define GB_STR(x) #x
// GrB_Type_new as a user-callable macro, which allows the name of the ctype
// to be added to the new type. The type_defn is unknown.
#define GrB_Type_new(utype, sizeof_ctype) \
GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL)
#define GrM_Type_new(utype, sizeof_ctype) \
GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL)
// GxB_Type_new creates a type with a name and definition that are known to
// GraphBLAS, as strings. The type_name is any valid string (max length of 128
// characters, including the required null-terminating character) that may
// appear as the name of a C type created by a C "typedef" statement. It must
// not contain any white-space characters. Example, creating a type of size
// 16*4+4 = 68 bytes, with a 4-by-4 dense float array and a 32-bit integer:
//
// typedef struct { float x [4][4] ; int color ; } myquaternion ;
// GrB_Type MyQtype ;
// GxB_Type_new (&MyQtype, sizeof (myquaternion), "myquaternion",
// "typedef struct { float x [4][4] ; int color ; } myquaternion ;") ;
//
// The type_name and type_defn are both null-terminated strings. Currently,
// type_defn is unused, but it will be required for best performance when a JIT
// is implemented in SuiteSparse:GraphBLAS (both on the CPU and GPU). User
// defined types created by GrB_Type_new will not work with a JIT.
//
// At most GxB_MAX_NAME_LEN characters are accessed in type_name; characters
// beyond that limit are silently ignored.
#define GxB_MAX_NAME_LEN 128
GB_PUBLIC
GrB_Info GxB_Type_new // create a new named GraphBLAS type
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype, // size = sizeof (ctype) of the C type
const char *type_name, // name of the type (max 128 characters)
const char *type_defn // typedef for the type (no max length)
) ;
// GB_Type_new is historical: use GxB_Type_new instead
GB_PUBLIC
GrB_Info GB_Type_new // not user-callable
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype, // size of the user type
const char *type_name // name of the type, as "sizeof (ctype)"
) ;
GB_PUBLIC
GrB_Info GxB_Type_name // return the name of a GraphBLAS type
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Type type
) ;
GB_PUBLIC
GrB_Info GxB_Type_size // determine the size of the type
(
size_t *size, // the sizeof the type
const GrB_Type type // type to determine the sizeof
) ;
GB_PUBLIC
GrB_Info GxB_Type_from_name // return the built-in GrB_Type from a name
(
GrB_Type *type, // built-in type, or NULL if user-defined
const char *type_name // array of size at least GxB_MAX_NAME_LEN
) ;
GB_PUBLIC
GrB_Info GrB_Type_free // free a user-defined type
(
GrB_Type *type // handle of user-defined type to free
) ;
//==============================================================================
// GrB_UnaryOp: unary operators
//==============================================================================
// GrB_UnaryOp: a function z=f(x). The function f must have the signature:
// void f (void *z, const void *x) ;
// The pointers are void * but they are always of pointers to objects of type
// ztype and xtype, respectively. The function must typecast its arguments as
// needed from void* to ztype* and xtype*.
typedef struct GB_UnaryOp_opaque *GrB_UnaryOp ;
//------------------------------------------------------------------------------
// built-in unary operators, z = f(x)
//------------------------------------------------------------------------------
GB_PUBLIC GrB_UnaryOp
// For these functions z=f(x), z and x have the same type.
// The suffix in the name is the type of x and z.
// z = x z = -x z = 1/x z = ! (x != 0)
// identity additive multiplicative logical
// inverse inverse negation
GrB_IDENTITY_BOOL, GrB_AINV_BOOL, GrB_MINV_BOOL, GxB_LNOT_BOOL,
GrB_IDENTITY_INT8, GrB_AINV_INT8, GrB_MINV_INT8, GxB_LNOT_INT8,
GrB_IDENTITY_INT16, GrB_AINV_INT16, GrB_MINV_INT16, GxB_LNOT_INT16,
GrB_IDENTITY_INT32, GrB_AINV_INT32, GrB_MINV_INT32, GxB_LNOT_INT32,
GrB_IDENTITY_INT64, GrB_AINV_INT64, GrB_MINV_INT64, GxB_LNOT_INT64,
GrB_IDENTITY_UINT8, GrB_AINV_UINT8, GrB_MINV_UINT8, GxB_LNOT_UINT8,
GrB_IDENTITY_UINT16, GrB_AINV_UINT16, GrB_MINV_UINT16, GxB_LNOT_UINT16,
GrB_IDENTITY_UINT32, GrB_AINV_UINT32, GrB_MINV_UINT32, GxB_LNOT_UINT32,
GrB_IDENTITY_UINT64, GrB_AINV_UINT64, GrB_MINV_UINT64, GxB_LNOT_UINT64,
GrB_IDENTITY_FP32, GrB_AINV_FP32, GrB_MINV_FP32, GxB_LNOT_FP32,
GrB_IDENTITY_FP64, GrB_AINV_FP64, GrB_MINV_FP64, GxB_LNOT_FP64,
// complex unary operators:
GxB_IDENTITY_FC32, GxB_AINV_FC32, GxB_MINV_FC32, // no LNOT
GxB_IDENTITY_FC64, GxB_AINV_FC64, GxB_MINV_FC64, // for complex
// z = 1 z = abs(x) z = bnot(x) z = signum
// one absolute value bitwise negation
GxB_ONE_BOOL, GrB_ABS_BOOL,
GxB_ONE_INT8, GrB_ABS_INT8, GrB_BNOT_INT8,
GxB_ONE_INT16, GrB_ABS_INT16, GrB_BNOT_INT16,
GxB_ONE_INT32, GrB_ABS_INT32, GrB_BNOT_INT32,
GxB_ONE_INT64, GrB_ABS_INT64, GrB_BNOT_INT64,
GxB_ONE_UINT8, GrB_ABS_UINT8, GrB_BNOT_UINT8,
GxB_ONE_UINT16, GrB_ABS_UINT16, GrB_BNOT_UINT16,
GxB_ONE_UINT32, GrB_ABS_UINT32, GrB_BNOT_UINT32,
GxB_ONE_UINT64, GrB_ABS_UINT64, GrB_BNOT_UINT64,
GxB_ONE_FP32, GrB_ABS_FP32,
GxB_ONE_FP64, GrB_ABS_FP64,
// complex unary operators:
GxB_ONE_FC32, // for complex types, z = abs(x)
GxB_ONE_FC64, // is real; listed below.
// Boolean negation, z = !x, where both z and x are boolean. There is no
// suffix since z and x are only boolean. This operator is identical to
// GxB_LNOT_BOOL; it just has a different name.
GrB_LNOT ;
// GxB_ABS is now in the v1.3 spec, the following names are historical:
GB_PUBLIC GrB_UnaryOp
// z = abs(x)
GxB_ABS_BOOL,
GxB_ABS_INT8,
GxB_ABS_INT16,
GxB_ABS_INT32,
GxB_ABS_INT64,
GxB_ABS_UINT8,
GxB_ABS_UINT16,
GxB_ABS_UINT32,
GxB_ABS_UINT64,
GxB_ABS_FP32,
GxB_ABS_FP64 ;
//------------------------------------------------------------------------------
// Unary operators for floating-point types only
//------------------------------------------------------------------------------
// The following floating-point unary operators and their ANSI C11 equivalents,
// are only defined for floating-point (real and complex) types.
GB_PUBLIC GrB_UnaryOp
//--------------------------------------------------------------------------
// z = f(x) where z and x have the same type (all 4 floating-point types)
//--------------------------------------------------------------------------
// z = sqrt (x) z = log (x) z = exp (x) z = log2 (x)
GxB_SQRT_FP32, GxB_LOG_FP32, GxB_EXP_FP32, GxB_LOG2_FP32,
GxB_SQRT_FP64, GxB_LOG_FP64, GxB_EXP_FP64, GxB_LOG2_FP64,
GxB_SQRT_FC32, GxB_LOG_FC32, GxB_EXP_FC32, GxB_LOG2_FC32,
GxB_SQRT_FC64, GxB_LOG_FC64, GxB_EXP_FC64, GxB_LOG2_FC64,
// z = sin (x) z = cos (x) z = tan (x)
GxB_SIN_FP32, GxB_COS_FP32, GxB_TAN_FP32,
GxB_SIN_FP64, GxB_COS_FP64, GxB_TAN_FP64,
GxB_SIN_FC32, GxB_COS_FC32, GxB_TAN_FC32,
GxB_SIN_FC64, GxB_COS_FC64, GxB_TAN_FC64,
// z = acos (x) z = asin (x) z = atan (x)
GxB_ACOS_FP32, GxB_ASIN_FP32, GxB_ATAN_FP32,
GxB_ACOS_FP64, GxB_ASIN_FP64, GxB_ATAN_FP64,
GxB_ACOS_FC32, GxB_ASIN_FC32, GxB_ATAN_FC32,
GxB_ACOS_FC64, GxB_ASIN_FC64, GxB_ATAN_FC64,
// z = sinh (x) z = cosh (x) z = tanh (x)
GxB_SINH_FP32, GxB_COSH_FP32, GxB_TANH_FP32,
GxB_SINH_FP64, GxB_COSH_FP64, GxB_TANH_FP64,
GxB_SINH_FC32, GxB_COSH_FC32, GxB_TANH_FC32,
GxB_SINH_FC64, GxB_COSH_FC64, GxB_TANH_FC64,
// z = acosh (x) z = asinh (x) z = atanh (x) z = signum (x)
GxB_ACOSH_FP32, GxB_ASINH_FP32, GxB_ATANH_FP32, GxB_SIGNUM_FP32,
GxB_ACOSH_FP64, GxB_ASINH_FP64, GxB_ATANH_FP64, GxB_SIGNUM_FP64,
GxB_ACOSH_FC32, GxB_ASINH_FC32, GxB_ATANH_FC32, GxB_SIGNUM_FC32,
GxB_ACOSH_FC64, GxB_ASINH_FC64, GxB_ATANH_FC64, GxB_SIGNUM_FC64,
// z = ceil (x) z = floor (x) z = round (x) z = trunc (x)
GxB_CEIL_FP32, GxB_FLOOR_FP32, GxB_ROUND_FP32, GxB_TRUNC_FP32,
GxB_CEIL_FP64, GxB_FLOOR_FP64, GxB_ROUND_FP64, GxB_TRUNC_FP64,
GxB_CEIL_FC32, GxB_FLOOR_FC32, GxB_ROUND_FC32, GxB_TRUNC_FC32,
GxB_CEIL_FC64, GxB_FLOOR_FC64, GxB_ROUND_FC64, GxB_TRUNC_FC64,
// z = exp2 (x) z = expm1 (x) z = log10 (x) z = log1p (x)
GxB_EXP2_FP32, GxB_EXPM1_FP32, GxB_LOG10_FP32, GxB_LOG1P_FP32,
GxB_EXP2_FP64, GxB_EXPM1_FP64, GxB_LOG10_FP64, GxB_LOG1P_FP64,
GxB_EXP2_FC32, GxB_EXPM1_FC32, GxB_LOG10_FC32, GxB_LOG1P_FC32,
GxB_EXP2_FC64, GxB_EXPM1_FC64, GxB_LOG10_FC64, GxB_LOG1P_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z and x are the same type (floating-point real only)
//--------------------------------------------------------------------------
// z = lgamma (x) z = tgamma (x) z = erf (x) z = erfc (x)
GxB_LGAMMA_FP32, GxB_TGAMMA_FP32, GxB_ERF_FP32, GxB_ERFC_FP32,
GxB_LGAMMA_FP64, GxB_TGAMMA_FP64, GxB_ERF_FP64, GxB_ERFC_FP64,
// frexpx and frexpe return the mantissa and exponent, respectively,
// from the ANSI C11 frexp function. The exponent is returned as a
// floating-point value, not an integer.
// z = frexpx (x) z = frexpe (x)
GxB_FREXPX_FP32, GxB_FREXPE_FP32,
GxB_FREXPX_FP64, GxB_FREXPE_FP64,
//--------------------------------------------------------------------------
// z = f(x) where z and x are the same type (complex only)
//--------------------------------------------------------------------------
// z = conj (x)
GxB_CONJ_FC32,
GxB_CONJ_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z is real and x is complex:
//--------------------------------------------------------------------------
// z = creal (x) z = cimag (x) z = carg (x) z = abs (x)
GxB_CREAL_FC32, GxB_CIMAG_FC32, GxB_CARG_FC32, GxB_ABS_FC32,
GxB_CREAL_FC64, GxB_CIMAG_FC64, GxB_CARG_FC64, GxB_ABS_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z is bool and x is any floating-point type
//--------------------------------------------------------------------------
// z = isinf (x)
GxB_ISINF_FP32,
GxB_ISINF_FP64,
GxB_ISINF_FC32, // isinf (creal (x)) || isinf (cimag (x))
GxB_ISINF_FC64, // isinf (creal (x)) || isinf (cimag (x))
// z = isnan (x)
GxB_ISNAN_FP32,
GxB_ISNAN_FP64,
GxB_ISNAN_FC32, // isnan (creal (x)) || isnan (cimag (x))
GxB_ISNAN_FC64, // isnan (creal (x)) || isnan (cimag (x))
// z = isfinite (x)
GxB_ISFINITE_FP32,
GxB_ISFINITE_FP64,
GxB_ISFINITE_FC32, // isfinite (real (x)) && isfinite (cimag (x))
GxB_ISFINITE_FC64 ; // isfinite (real (x)) && isfinite (cimag (x))
//------------------------------------------------------------------------------
// methods for unary operators
//------------------------------------------------------------------------------
typedef void (*GxB_unary_function) (void *, const void *) ;
// GrB_UnaryOp_new creates a user-defined unary op, with an automatic
// detection of the operator name.
#undef GrB_UnaryOp_new
#undef GrM_UnaryOp_new
GB_PUBLIC
GrB_Info GRB (UnaryOp_new) // create a new user-defined unary operator
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype // type of input x
) ;
#define GrB_UnaryOp_new(op,f,z,x) \
GxB_UnaryOp_new(op,f,z,x, GB_STR(f), NULL)
#define GrM_UnaryOp_new(op,f,z,x) \
GxM_UnaryOp_new(op,f,z,x, GB_STR(f), NULL)
// GxB_UnaryOp_new creates a named user-defined unary op.
GB_PUBLIC
GrB_Info GxB_UnaryOp_new // create a new user-defined unary operator
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
const char *unop_name, // name of the user function
const char *unop_defn // definition of the user function
) ;
// GB_UnaryOp_new is historical: use GxB_UnaryOp_new instead
GB_PUBLIC
GrB_Info GB_UnaryOp_new // not user-callable
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
const char *unop_name // name of the user function
) ;
// GxB_UnaryOp_ztype is historical. Use GxB_UnaryOp_ztype_name instead.
GB_PUBLIC
GrB_Info GxB_UnaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_UnaryOp unaryop // unary operator
) ;
// GxB_UnaryOp_xtype is historical. Use GxB_UnaryOp_xtype_name instead.
GB_PUBLIC
GrB_Info GxB_UnaryOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GrB_UnaryOp_free // free a user-created unary operator
(
GrB_UnaryOp *unaryop // handle of unary operator to free
) ;
//==============================================================================
// GrB_BinaryOp: binary operators
//==============================================================================
// GrB_BinaryOp: a function z=f(x,y). The function f must have the signature:
// void f (void *z, const void *x, const void *y) ;
// The pointers are void * but they are always of pointers to objects of type
// ztype, xtype, and ytype, respectively. See Demo/usercomplex.c for examples.
typedef struct GB_BinaryOp_opaque *GrB_BinaryOp ;
//------------------------------------------------------------------------------
// built-in binary operators, z = f(x,y), where x,y,z all have the same type
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// operators for all 13 types (including complex):
// GxB_PAIR_T and GrB_ONEB_T are identical; the latter was added to the
// v2.0 C API Specification.
// z = x z = y z = 1 z = pow (x,y)
GrB_FIRST_BOOL, GrB_SECOND_BOOL, GrB_ONEB_BOOL, GxB_POW_BOOL,
GrB_FIRST_INT8, GrB_SECOND_INT8, GrB_ONEB_INT8, GxB_POW_INT8,
GrB_FIRST_INT16, GrB_SECOND_INT16, GrB_ONEB_INT16, GxB_POW_INT16,
GrB_FIRST_INT32, GrB_SECOND_INT32, GrB_ONEB_INT32, GxB_POW_INT32,
GrB_FIRST_INT64, GrB_SECOND_INT64, GrB_ONEB_INT64, GxB_POW_INT64,
GrB_FIRST_UINT8, GrB_SECOND_UINT8, GrB_ONEB_UINT8, GxB_POW_UINT8,
GrB_FIRST_UINT16, GrB_SECOND_UINT16, GrB_ONEB_UINT16, GxB_POW_UINT16,
GrB_FIRST_UINT32, GrB_SECOND_UINT32, GrB_ONEB_UINT32, GxB_POW_UINT32,
GrB_FIRST_UINT64, GrB_SECOND_UINT64, GrB_ONEB_UINT64, GxB_POW_UINT64,
GrB_FIRST_FP32, GrB_SECOND_FP32, GrB_ONEB_FP32, GxB_POW_FP32,
GrB_FIRST_FP64, GrB_SECOND_FP64, GrB_ONEB_FP64, GxB_POW_FP64,
// complex:
GxB_FIRST_FC32, GxB_SECOND_FC32, GxB_ONEB_FC32, GxB_POW_FC32,
GxB_FIRST_FC64, GxB_SECOND_FC64, GxB_ONEB_FC64, GxB_POW_FC64,
// z = x+y z = x-y z = x*y z = x/y
GrB_PLUS_BOOL, GrB_MINUS_BOOL, GrB_TIMES_BOOL, GrB_DIV_BOOL,
GrB_PLUS_INT8, GrB_MINUS_INT8, GrB_TIMES_INT8, GrB_DIV_INT8,
GrB_PLUS_INT16, GrB_MINUS_INT16, GrB_TIMES_INT16, GrB_DIV_INT16,
GrB_PLUS_INT32, GrB_MINUS_INT32, GrB_TIMES_INT32, GrB_DIV_INT32,
GrB_PLUS_INT64, GrB_MINUS_INT64, GrB_TIMES_INT64, GrB_DIV_INT64,
GrB_PLUS_UINT8, GrB_MINUS_UINT8, GrB_TIMES_UINT8, GrB_DIV_UINT8,
GrB_PLUS_UINT16, GrB_MINUS_UINT16, GrB_TIMES_UINT16, GrB_DIV_UINT16,
GrB_PLUS_UINT32, GrB_MINUS_UINT32, GrB_TIMES_UINT32, GrB_DIV_UINT32,
GrB_PLUS_UINT64, GrB_MINUS_UINT64, GrB_TIMES_UINT64, GrB_DIV_UINT64,
GrB_PLUS_FP32, GrB_MINUS_FP32, GrB_TIMES_FP32, GrB_DIV_FP32,
GrB_PLUS_FP64, GrB_MINUS_FP64, GrB_TIMES_FP64, GrB_DIV_FP64,
// complex:
GxB_PLUS_FC32, GxB_MINUS_FC32, GxB_TIMES_FC32, GxB_DIV_FC32,
GxB_PLUS_FC64, GxB_MINUS_FC64, GxB_TIMES_FC64, GxB_DIV_FC64,
// z = y-x z = y/x z = 1 z = any(x,y)
GxB_RMINUS_BOOL, GxB_RDIV_BOOL, GxB_PAIR_BOOL, GxB_ANY_BOOL,
GxB_RMINUS_INT8, GxB_RDIV_INT8, GxB_PAIR_INT8, GxB_ANY_INT8,
GxB_RMINUS_INT16, GxB_RDIV_INT16, GxB_PAIR_INT16, GxB_ANY_INT16,
GxB_RMINUS_INT32, GxB_RDIV_INT32, GxB_PAIR_INT32, GxB_ANY_INT32,
GxB_RMINUS_INT64, GxB_RDIV_INT64, GxB_PAIR_INT64, GxB_ANY_INT64,
GxB_RMINUS_UINT8, GxB_RDIV_UINT8, GxB_PAIR_UINT8, GxB_ANY_UINT8,
GxB_RMINUS_UINT16, GxB_RDIV_UINT16, GxB_PAIR_UINT16, GxB_ANY_UINT16,
GxB_RMINUS_UINT32, GxB_RDIV_UINT32, GxB_PAIR_UINT32, GxB_ANY_UINT32,
GxB_RMINUS_UINT64, GxB_RDIV_UINT64, GxB_PAIR_UINT64, GxB_ANY_UINT64,
GxB_RMINUS_FP32, GxB_RDIV_FP32, GxB_PAIR_FP32, GxB_ANY_FP32,
GxB_RMINUS_FP64, GxB_RDIV_FP64, GxB_PAIR_FP64, GxB_ANY_FP64,
// complex:
GxB_RMINUS_FC32, GxB_RDIV_FC32, GxB_PAIR_FC32, GxB_ANY_FC32,
GxB_RMINUS_FC64, GxB_RDIV_FC64, GxB_PAIR_FC64, GxB_ANY_FC64,
// The GxB_IS* comparators z=f(x,y) return the same type as their
// inputs. Each of them compute z = (x OP y), where x, y, and z all have
// the same type. The value z is either 1 for true or 0 for false, but it
// is a value with the same type as x and y.
// z = (x == y) z = (x != y)
GxB_ISEQ_BOOL, GxB_ISNE_BOOL,
GxB_ISEQ_INT8, GxB_ISNE_INT8,
GxB_ISEQ_INT16, GxB_ISNE_INT16,
GxB_ISEQ_INT32, GxB_ISNE_INT32,
GxB_ISEQ_INT64, GxB_ISNE_INT64,
GxB_ISEQ_UINT8, GxB_ISNE_UINT8,
GxB_ISEQ_UINT16, GxB_ISNE_UINT16,
GxB_ISEQ_UINT32, GxB_ISNE_UINT32,
GxB_ISEQ_UINT64, GxB_ISNE_UINT64,
GxB_ISEQ_FP32, GxB_ISNE_FP32,
GxB_ISEQ_FP64, GxB_ISNE_FP64,
// complex:
GxB_ISEQ_FC32, GxB_ISNE_FC32,
GxB_ISEQ_FC64, GxB_ISNE_FC64,
// z = (x > y) z = (x < y) z = (x >= y) z = (x <= y)
GxB_ISGT_BOOL, GxB_ISLT_BOOL, GxB_ISGE_BOOL, GxB_ISLE_BOOL,
GxB_ISGT_INT8, GxB_ISLT_INT8, GxB_ISGE_INT8, GxB_ISLE_INT8,
GxB_ISGT_INT16, GxB_ISLT_INT16, GxB_ISGE_INT16, GxB_ISLE_INT16,
GxB_ISGT_INT32, GxB_ISLT_INT32, GxB_ISGE_INT32, GxB_ISLE_INT32,
GxB_ISGT_INT64, GxB_ISLT_INT64, GxB_ISGE_INT64, GxB_ISLE_INT64,
GxB_ISGT_UINT8, GxB_ISLT_UINT8, GxB_ISGE_UINT8, GxB_ISLE_UINT8,
GxB_ISGT_UINT16, GxB_ISLT_UINT16, GxB_ISGE_UINT16, GxB_ISLE_UINT16,
GxB_ISGT_UINT32, GxB_ISLT_UINT32, GxB_ISGE_UINT32, GxB_ISLE_UINT32,
GxB_ISGT_UINT64, GxB_ISLT_UINT64, GxB_ISGE_UINT64, GxB_ISLE_UINT64,
GxB_ISGT_FP32, GxB_ISLT_FP32, GxB_ISGE_FP32, GxB_ISLE_FP32,
GxB_ISGT_FP64, GxB_ISLT_FP64, GxB_ISGE_FP64, GxB_ISLE_FP64,
// z = min(x,y) z = max (x,y)
GrB_MIN_BOOL, GrB_MAX_BOOL,
GrB_MIN_INT8, GrB_MAX_INT8,
GrB_MIN_INT16, GrB_MAX_INT16,
GrB_MIN_INT32, GrB_MAX_INT32,
GrB_MIN_INT64, GrB_MAX_INT64,
GrB_MIN_UINT8, GrB_MAX_UINT8,
GrB_MIN_UINT16, GrB_MAX_UINT16,
GrB_MIN_UINT32, GrB_MAX_UINT32,
GrB_MIN_UINT64, GrB_MAX_UINT64,
GrB_MIN_FP32, GrB_MAX_FP32,
GrB_MIN_FP64, GrB_MAX_FP64,
// Binary operators for each of the 11 real types:
// The operators convert non-boolean types internally to boolean and return
// a value 1 or 0 in the same type, for true or false. Each computes z =
// ((x != 0) OP (y != 0)), where x, y, and z all the same type. These
// operators are useful as multiplicative operators when combined with
// non-boolean monoids of the same type.
// z = (x || y) z = (x && y) z = (x != y)
GxB_LOR_BOOL, GxB_LAND_BOOL, GxB_LXOR_BOOL,
GxB_LOR_INT8, GxB_LAND_INT8, GxB_LXOR_INT8,
GxB_LOR_INT16, GxB_LAND_INT16, GxB_LXOR_INT16,
GxB_LOR_INT32, GxB_LAND_INT32, GxB_LXOR_INT32,
GxB_LOR_INT64, GxB_LAND_INT64, GxB_LXOR_INT64,
GxB_LOR_UINT8, GxB_LAND_UINT8, GxB_LXOR_UINT8,
GxB_LOR_UINT16, GxB_LAND_UINT16, GxB_LXOR_UINT16,
GxB_LOR_UINT32, GxB_LAND_UINT32, GxB_LXOR_UINT32,
GxB_LOR_UINT64, GxB_LAND_UINT64, GxB_LXOR_UINT64,
GxB_LOR_FP32, GxB_LAND_FP32, GxB_LXOR_FP32,
GxB_LOR_FP64, GxB_LAND_FP64, GxB_LXOR_FP64,
// Binary operators that operate only on boolean types: LOR, LAND, LXOR,
// and LXNOR. The naming convention differs (_BOOL is not appended to the
// name). They are the same as GxB_LOR_BOOL, GxB_LAND_BOOL, and
// GxB_LXOR_BOOL, and GrB_EQ_BOOL, respectively.
// z = (x || y) z = (x && y) z = (x != y) z = (x == y)
GrB_LOR, GrB_LAND, GrB_LXOR, GrB_LXNOR,
// Operators for floating-point reals:
// z = atan2(x,y) z = hypot(x,y) z = fmod(x,y) z = remainder(x,y)
GxB_ATAN2_FP32, GxB_HYPOT_FP32, GxB_FMOD_FP32, GxB_REMAINDER_FP32,
GxB_ATAN2_FP64, GxB_HYPOT_FP64, GxB_FMOD_FP64, GxB_REMAINDER_FP64,
// z = ldexp(x,y) z = copysign (x,y)
GxB_LDEXP_FP32, GxB_COPYSIGN_FP32,
GxB_LDEXP_FP64, GxB_COPYSIGN_FP64,
// Bitwise operations on signed and unsigned integers: note that
// bitwise operations on signed integers can lead to different results,
// depending on your compiler; results are implementation-defined.
// z = (x | y) z = (x & y) z = (x ^ y) z = ~(x ^ y)
GrB_BOR_INT8, GrB_BAND_INT8, GrB_BXOR_INT8, GrB_BXNOR_INT8,
GrB_BOR_INT16, GrB_BAND_INT16, GrB_BXOR_INT16, GrB_BXNOR_INT16,
GrB_BOR_INT32, GrB_BAND_INT32, GrB_BXOR_INT32, GrB_BXNOR_INT32,
GrB_BOR_INT64, GrB_BAND_INT64, GrB_BXOR_INT64, GrB_BXNOR_INT64,
GrB_BOR_UINT8, GrB_BAND_UINT8, GrB_BXOR_UINT8, GrB_BXNOR_UINT8,
GrB_BOR_UINT16, GrB_BAND_UINT16, GrB_BXOR_UINT16, GrB_BXNOR_UINT16,
GrB_BOR_UINT32, GrB_BAND_UINT32, GrB_BXOR_UINT32, GrB_BXNOR_UINT32,
GrB_BOR_UINT64, GrB_BAND_UINT64, GrB_BXOR_UINT64, GrB_BXNOR_UINT64,
// z = bitget(x,y) z = bitset(x,y) z = bitclr(x,y)
GxB_BGET_INT8, GxB_BSET_INT8, GxB_BCLR_INT8,
GxB_BGET_INT16, GxB_BSET_INT16, GxB_BCLR_INT16,
GxB_BGET_INT32, GxB_BSET_INT32, GxB_BCLR_INT32,
GxB_BGET_INT64, GxB_BSET_INT64, GxB_BCLR_INT64,
GxB_BGET_UINT8, GxB_BSET_UINT8, GxB_BCLR_UINT8,
GxB_BGET_UINT16, GxB_BSET_UINT16, GxB_BCLR_UINT16,
GxB_BGET_UINT32, GxB_BSET_UINT32, GxB_BCLR_UINT32,
GxB_BGET_UINT64, GxB_BSET_UINT64, GxB_BCLR_UINT64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z and x have the same type, but y is GrB_INT8
//------------------------------------------------------------------------------
// z = bitshift (x,y) computes z = x left-shifted by y bits if y >= 0, or z
// = x right-shifted by (-y) bits if y < 0. z is equal to x if y is zero.
// z and x have the same type, as given by the suffix on the operator name.
// Since y must be signed, it cannot have the same type as x when x is
// unsigned; it is always GrB_INT8 for all 8 versions of this operator.
// The GxB_BSHIFT_* operators compute the arithmetic shift, and produce the
// same results as the bitshift.m function, for all possible inputs.
GB_PUBLIC GrB_BinaryOp
// z = bitshift(x,y)
GxB_BSHIFT_INT8,
GxB_BSHIFT_INT16,
GxB_BSHIFT_INT32,
GxB_BSHIFT_INT64,
GxB_BSHIFT_UINT8,
GxB_BSHIFT_UINT16,
GxB_BSHIFT_UINT32,
GxB_BSHIFT_UINT64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z is BOOL and the type of x,y is given by the suffix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// Six comparators z=f(x,y) return their result as boolean, but
// where x and y have the same type. The suffix in their names refers to
// the type of x and y since z is always boolean. If used as multiply
// operators in a semiring, they can only be combined with boolean monoids.
// The _BOOL versions of these operators give the same results as their
// IS*_BOOL counterparts. GrB_EQ_BOOL and GrB_LXNOR are identical.
// z = (x == y) z = (x != y) z = (x > y) z = (x < y)
GrB_EQ_BOOL, GrB_NE_BOOL, GrB_GT_BOOL, GrB_LT_BOOL,
GrB_EQ_INT8, GrB_NE_INT8, GrB_GT_INT8, GrB_LT_INT8,
GrB_EQ_INT16, GrB_NE_INT16, GrB_GT_INT16, GrB_LT_INT16,
GrB_EQ_INT32, GrB_NE_INT32, GrB_GT_INT32, GrB_LT_INT32,
GrB_EQ_INT64, GrB_NE_INT64, GrB_GT_INT64, GrB_LT_INT64,
GrB_EQ_UINT8, GrB_NE_UINT8, GrB_GT_UINT8, GrB_LT_UINT8,
GrB_EQ_UINT16, GrB_NE_UINT16, GrB_GT_UINT16, GrB_LT_UINT16,
GrB_EQ_UINT32, GrB_NE_UINT32, GrB_GT_UINT32, GrB_LT_UINT32,
GrB_EQ_UINT64, GrB_NE_UINT64, GrB_GT_UINT64, GrB_LT_UINT64,
GrB_EQ_FP32, GrB_NE_FP32, GrB_GT_FP32, GrB_LT_FP32,
GrB_EQ_FP64, GrB_NE_FP64, GrB_GT_FP64, GrB_LT_FP64,
// complex:
GxB_EQ_FC32, GxB_NE_FC32,
GxB_EQ_FC64, GxB_NE_FC64,
// z = (x >= y) z = (x <= y)
GrB_GE_BOOL, GrB_LE_BOOL,
GrB_GE_INT8, GrB_LE_INT8,
GrB_GE_INT16, GrB_LE_INT16,
GrB_GE_INT32, GrB_LE_INT32,
GrB_GE_INT64, GrB_LE_INT64,
GrB_GE_UINT8, GrB_LE_UINT8,
GrB_GE_UINT16, GrB_LE_UINT16,
GrB_GE_UINT32, GrB_LE_UINT32,
GrB_GE_UINT64, GrB_LE_UINT64,
GrB_GE_FP32, GrB_LE_FP32,
GrB_GE_FP64, GrB_LE_FP64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z is complex and the type of x,y is given by the suffix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// z = cmplx (x,y)
GxB_CMPLX_FP32,
GxB_CMPLX_FP64 ;
//==============================================================================
// positional GrB_UnaryOp and GrB_BinaryOp operators
//==============================================================================
// Positional operators do not depend on the value of an entry, but its row or
// column index in the matrix instead. For example, for an entry A(i,j),
// first_i(A(i,j),y) is equal to i. These operators are useful for returning
// node id's as the result of a semiring operation. If used as a mask, zero
// has a special value, and thus z=first_i1(A(i,j),j) returns i+1 instead of i.
// This can be useful when using a positional operator to construct a mask
// matrix or vector for another GraphBLAS operation. It is also essential for
// the @GrB interface, since the user view of matrix indices in @GrB is
// 1-based, not 0-based.
// When applied to a vector, j is always equal to 0. For a GxB_SCALAR,
// both i and j are always zero.
// GraphBLAS defines a GrB_Index as uint64_t, but these operators return a
// GrB_INT32 or GrB_INT64 type, which is more flexible to use because the
// result of this operator can be negated, to flag an entry for example. The
// value -1 can be used to denote "no node" or "no position". GrB_INT32 is
// useful for graphs smaller than 2^31 nodes. If the row or column index
// exceeds INT32_MAX, the result is determined by the typecast from the
// 64-bit index to the smaller 32-bit index.
// Positional operators cannot be used to construct monoids. They can be used
// as multiplicative operators in semirings, and as operators for GrB_eWise*,
// and GrB_apply (bind first or second). For the latter, the operator cannot
// depend on the bound scalar.
// When used as multiplicative operators in a semiring, FIRSTJ and SECONDI
// are identical. If C(i,j) += t is computed where t = A(i,k)*B(k,j), then
// t = k in both cases. Likewise, FIRSTJ1 and SECONDI1 are identical.
GB_PUBLIC GrB_BinaryOp
GxB_FIRSTI_INT32, GxB_FIRSTI_INT64, // z = first_i(A(i,j),y) == i
GxB_FIRSTI1_INT32, GxB_FIRSTI1_INT64, // z = first_i1(A(i,j),y) == i+1
GxB_FIRSTJ_INT32, GxB_FIRSTJ_INT64, // z = first_j(A(i,j),y) == j
GxB_FIRSTJ1_INT32, GxB_FIRSTJ1_INT64, // z = first_j1(A(i,j),y) == j+1
GxB_SECONDI_INT32, GxB_SECONDI_INT64, // z = second_i(x,B(i,j)) == i
GxB_SECONDI1_INT32, GxB_SECONDI1_INT64, // z = second_i1(x,B(i,j)) == i+1
GxB_SECONDJ_INT32, GxB_SECONDJ_INT64, // z = second_j(x,B(i,j)) == j
GxB_SECONDJ1_INT32, GxB_SECONDJ1_INT64 ; // z = second_j1(x,B(i,j)) == j+1
GB_PUBLIC GrB_UnaryOp
GxB_POSITIONI_INT32, GxB_POSITIONI_INT64, // z=position_i(A(i,j)) == i
GxB_POSITIONI1_INT32, GxB_POSITIONI1_INT64, // z=position_i1(A(i,j)) == i+1
GxB_POSITIONJ_INT32, GxB_POSITIONJ_INT64, // z=position_j(A(i,j)) == j
GxB_POSITIONJ1_INT32, GxB_POSITIONJ1_INT64 ;// z=position_j1(A(i,j)) == j+1
//==============================================================================
// special GrB_BinaryOp for build methods only
//==============================================================================
// In GrB*build* methods, passing dup as NULL means that no duplicates are
// tolerated. If duplicates appear, an error is returned. If dup is a binary
// operator, it is applied to reduce duplicates to a single value. The
// GxB_IGNORE_DUP is a special case. It is not an operator, but an indication
// that any duplicates are to be ignored.
GB_PUBLIC GrB_BinaryOp GxB_IGNORE_DUP ;
//==============================================================================
// About boolean and bitwise binary operators
//==============================================================================
// Some of the boolean operators compute the same thing with different names.
// For example, x*y and x&&y give the same results for boolean x and y.
// Operations such as x < y when x and y are boolean are treated as if true=1
// and false=0. Below is the truth table for all binary operators with boolean
// inputs. This table is defined by how C typecasts boolean values for
// non-boolean operations. For example, if x, y, and z are boolean, x = true,
// and y = true, then z = x + y = true + true = true. DIV (x/y) is defined
// below. RDIV (y/x) is shown as \ in the table; it is the same as 2nd.
// x y 1st 2nd min max + - * / or and xor eq ne > < ge le \ pow pair
// 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 1 1
// 0 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1
// 1 0 1 0 0 1 1 1 0 1 1 0 1 0 1 1 0 1 0 0 1 1
// 1 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 0 1 1 1 1 1
// GraphBLAS includes a GrB_DIV_BOOL operator in its specification, but does
// not define what boolean "division" means. SuiteSparse:GraphBLAS makes the
// following interpretation.
// GraphBLAS does not generate exceptions for divide-by-zero. Floating-point
// divide-by-zero follows the IEEE 754 standard: 1/0 is +Inf, -1/0 is -Inf, and
// 0/0 is NaN. For integer division by zero, if x is positive, x/0 is the
// largest integer, -x/0 is the integer minimum (zero for unsigned integers),
// and 0/0 is zero. For example, for int8, 1/0 is 127, and -1/0 is -128. For
// uint8, 1/0 is 255 and 0/0 is zero.
// Boolean division is treated as if it were an unsigned integer type with
// true=1 and false=0, and with the max and min value being 1 and 0. As a
// result, GrB_IDENTITY_BOOL, GrB_AINV_BOOL, and GrB_MINV_BOOL all give the
// same result (z = x).
// With this convention for boolean "division", there are 11 unique binary
// operators that are purely boolean. Other named *_BOOL operators are
// redundant but are included in GraphBLAS so that the name space of operators
// is complete. Below is a list of all operators and their equivalents.
// x: 0 0 1 1
// y: 0 1 0 1
// z: see below
//
// z = 0 0 0 0 0 (zero function, not predefined)
// z = (x && y) 0 0 0 1 AND, MIN, TIMES
// z = (x > y) 0 0 1 0 GT, ISGT, and set diff (x\y)
// z = x 0 0 1 1 FIRST, DIV
//
// z = (x < y) 0 1 0 0 LT, ISLT, and set diff (y\x)
// z = y 0 1 0 1 SECOND, RDIV
// z = (x != y) 0 1 1 0 XOR, MINUS, RMINUS, NE, ISNE
// z = (x || y) 0 1 1 1 OR, MAX, PLUS
//
// z = ~(x || y) 1 0 0 0 (nor(x,y) function, not predefined)
// z = (x == y) 1 0 0 1 LXNOR, EQ, ISEQ
// z = ~y 1 0 1 0 (not(y), not predefined)
// z = (x >= y) 1 0 1 1 GE, ISGE, POW, and "x implies y"
//
// z = ~x 1 1 0 0 (not(x), not predefined)
// z = (x <= y) 1 1 0 1 LE, ISLE, and "y implies x"
// z = ~(x && y) 1 1 1 0 (nand(x,y) function, not predefined)
// z = 1 1 1 1 1 PAIR, ONEB
//
// z = any(x,y) 0 . . 1 ANY (pick x or y arbitrarily)
// Four more that have no _BOOL suffix are also redundant with the operators
// of the form GxB_*_BOOL (GrB_LOR, GrB_LAND, GrB_LXOR, and GrB_LXNOR).
// Note that the boolean binary operator space is not complete. Five other
// boolean functions could be pre-defined as well: z = 0, nor(x,y),
// nand(x,y), not(x), and not(y).
// Four of the possible 16 bitwise operators are pre-defined: BOR, BAND,
// BXOR, and BXNOR. This assumes that the computations for each bit are
// entirely independent (so BSHIFT would not fit in the table above).
//------------------------------------------------------------------------------
// methods for binary operators
//------------------------------------------------------------------------------
typedef void (*GxB_binary_function) (void *, const void *, const void *) ;
// GrB_BinaryOp_new creates a user-defined binary op, with an automatic
// detection of the operator name.
#undef GrB_BinaryOp_new
#undef GrM_BinaryOp_new
GB_PUBLIC
GrB_Info GRB (BinaryOp_new)
(
GrB_BinaryOp *binaryop, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype // type of input y
) ;
#define GrB_BinaryOp_new(op,f,z,x,y) \
GxB_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
#define GrM_BinaryOp_new(op,f,z,x,y) \
GxM_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
// GxB_BinaryOp_new creates a named user-defined binary op.
GB_PUBLIC
GrB_Info GxB_BinaryOp_new
(
GrB_BinaryOp *op, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *binop_name, // name of the user function
const char *binop_defn // definition of the user function
) ;
// GB_BinaryOp_new is historical: use GxB_BinaryOp_new instead
GB_PUBLIC
GrB_Info GB_BinaryOp_new // not user-callable
(
GrB_BinaryOp *binaryop, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *binop_name // name of the user function
) ;
// NOTE: GxB_BinaryOp_ztype is historical. Use GxB_BinaryOp_ztype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
// NOTE: GxB_BinaryOp_xtype is historical. Use GxB_BinaryOp_xtype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
// NOTE: GxB_BinaryOp_ytype is historical. Use GxB_BinaryOp_ytype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_ytype // return the type of y
(
GrB_Type *ytype, // return type of input y
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_ytype_name // return the type_name of y
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GrB_BinaryOp_free // free a user-created binary operator
(
GrB_BinaryOp *binaryop // handle of binary operator to free
) ;
//==============================================================================
// GxB_SelectOp: select operators (historical)
//==============================================================================
// GrB_IndexUnaryOp should be used instead of GxB_SelectOp.
// GxB_SelectOp is an operator used by GxB_select to select entries from an
// input matrix A that are kept in the output C. If an entry A(i,j) in the
// matrix A, of size nrows-by-ncols, has the value aij, then it calls the
// select function as result = f (i, j, aij, thunk). If the function returns
// true, the entry is kept in the output C. If f returns false, the entry is
// not kept in C. The type of x for the GxB_SelectOp operator may be any of
// the 11 built-in types, or any user-defined type. It may also be GrB_NULL,
// to indicate that the function is type-generic and does not depend at all on
// the value aij. In this case, x is passed to f as a NULL pointer.
// The optional Thunk parameter to GxB_select is a GrB_Scalar. For built-in
// select operators (TRIL, TRIU, DIAG, and OFFDIAG), Thunk must have any
// built-in type, and thunk = (int64_t) Thunk is used to specify the diagonal
// for these operators. Thunk may be NULL, in which case its value is treated
// as zero, if it has a built-in type. The value of Thunk (if present) is not
// modified by any built-in select operator.
// For user-defined select operators, Thunk is not typecasted at all. If
// the user operator is defined with a non-NULL Thunk input, then it must
// be non-NULL and of the same type, when calling GxB_select.
// GxB_SelectOp: a function z=f(i,j,x,thunk) for the GxB_Select operation.
// The function f must have the signature:
// bool f (GrB_Index i, GrB_Index j, const void *x, const void *thunk) ;
// The values of i and j are guaranteed to be in the range 0 to
// GrB_INDEX_MAX, and they can be safely typecasted to int64_t then negated,
// if desired, without any risk of integer overflow.
typedef struct GB_SelectOp_opaque *GxB_SelectOp ;
//------------------------------------------------------------------------------
// built-in select operators (historical)
//------------------------------------------------------------------------------
// GxB_select (C, Mask, accum, op, A, Thunk, desc) always returns a matrix C of
// the same size as A (or A' if GrB_TRAN is in the descriptor).
GB_PUBLIC GxB_SelectOp
GxB_TRIL, // C=tril(A,thunk): returns true if ((j-i) <= thunk)
GxB_TRIU, // C=triu(A,thunk): returns true if ((j-i) >= thunk)
GxB_DIAG, // C=diag(A,thunk): returns true if ((j-i) == thunk)
GxB_OFFDIAG, // C=A-diag(A,thunk): returns true if ((j-i) != thunk)
GxB_NONZERO, // C=A(A ~= 0)
GxB_EQ_ZERO, // C=A(A == 0)
GxB_GT_ZERO, // C=A(A > 0)
GxB_GE_ZERO, // C=A(A >= 0)
GxB_LT_ZERO, // C=A(A < 0)
GxB_LE_ZERO, // C=A(A <= 0)
GxB_NE_THUNK, // C=A(A ~= thunk)
GxB_EQ_THUNK, // C=A(A == thunk)
GxB_GT_THUNK, // C=A(A > thunk)
GxB_GE_THUNK, // C=A(A >= thunk)
GxB_LT_THUNK, // C=A(A < thunk)
GxB_LE_THUNK ; // C=A(A <= thunk)
// For GxB_TRIL, GxB_TRIU, GxB_DIAG, and GxB_OFFDIAG, the parameter Thunk is a
// GrB_Scalar of any built-in type. If GrB_NULL, or empty, Thunk is treated as
// zero. Otherwise, the single entry is typecasted as (int64_t) Thunk.
// These select operators do not depend on the values of A, but just their
// position, and they work on matrices of any type.
// For GxB_*ZERO, the result depends only on the value of A(i,j). The Thunk
// parameter to GxB_select is ignored and may be GrB_NULL.
// The operators GxB_TRIL, GxB_TRIU, GxB_DIAG, GxB_OFFDIAG, GxB_NONZERO,
// GxB_EQ_ZERO, GxB_NE_THUNK, and GxB_EQ_THUNK work on all built-in types and
// all user-defined types.
// GxB_GT_*, GxB_GE_*, GxB_LT_*, and GxB_LE_* only work on the 11 built-in
// types (not complex). They cannot be used for user-defined types.
//------------------------------------------------------------------------------
// select operators: (historical)
//------------------------------------------------------------------------------
// User-defined GxB_SelectOps are historical. New code should use
// GrB_IndexUnaryOp_new instead.
typedef bool (*GxB_select_function) // return true if A(i,j) is kept
(
GrB_Index i, // row index of A(i,j)
GrB_Index j, // column index of A(i,j)
const void *x, // value of A(i,j)
const void *thunk // optional input for select function
) ;
#undef GxB_SelectOp_new
#undef GxM_SelectOp_new
GB_PUBLIC
GrB_Info GXB (SelectOp_new) // create a new user-defined select operator
(
GxB_SelectOp *selectop, // handle for the new select operator
GxB_select_function function,// pointer to the select function
GrB_Type xtype, // type of input x, or NULL if type-generic
GrB_Type ttype // type of thunk, or NULL if not used
) ;
#define GxB_SelectOp_new(op,f,x,t) GB_SelectOp_new (op,f,x,t, GB_STR(f))
#define GxM_SelectOp_new(op,f,x,t) GM_SelectOp_new (op,f,x,t, GB_STR(f))
// GB_SelectOp_new should not be called directly, but only through the
// GxB_SelectOp_new macro (but use GrB_IndexUnaryOp_new instead).
GB_PUBLIC
GrB_Info GB_SelectOp_new // not user-callable
(
GxB_SelectOp *selectop, // handle for the new select operator
GxB_select_function function,// pointer to the select function
GrB_Type xtype, // type of input x
GrB_Type ttype, // type of thunk, or NULL if not used
const char *name // name of the underlying function
) ;
// GxB_SelectOp_xtype is historical. Use a GrB_IndexUnaryOp instead.
GB_PUBLIC
GrB_Info GxB_SelectOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GxB_SelectOp selectop // select operator
) ;
// GxB_SelectOp_ttype is historical. Use a GrB_IndexUnaryOp instead.
GB_PUBLIC
GrB_Info GxB_SelectOp_ttype // return the type of thunk
(
GrB_Type *ttype, // return type of input thunk
GxB_SelectOp selectop // select operator
) ;
GB_PUBLIC
GrB_Info GxB_SelectOp_free // free a user-created select operator
(
GxB_SelectOp *selectop // handle of select operator to free
) ;
//==============================================================================
// GrB_IndexUnaryOp: a unary operator that depends on the row/col indices
//==============================================================================
// The indexop has the form z = f(aij, i, j, y) where aij is the numerical
// value of the A(i,j) entry, i and j are its row and column index, and y
// is a scalar. For vectors, it has the form z = f(vi, i, 0, y).
typedef struct GB_IndexUnaryOp_opaque *GrB_IndexUnaryOp ;
typedef void (*GxB_index_unary_function)
(
void *z, // output value z, of type ztype
const void *x, // input value x of type xtype; value of v(i) or A(i,j)
GrB_Index i, // row index of A(i,j)
GrB_Index j, // column index of A(i,j), or zero for v(i)
const void *y // input scalar y
) ;
// GrB_IndexUnaryOp_new creates a user-defined unary op, with an automatic
// detection of the operator name.
#undef GrB_IndexUnaryOp_new
#undef GrM_IndexUnaryOp_new
GB_PUBLIC
GrB_Info GRB (IndexUnaryOp_new) // create a new user-defined IndexUnary op
(
GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator
GxB_index_unary_function function, // pointer to IndexUnary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x (the A(i,j) entry)
GrB_Type ytype // type of input y (the scalar)
) ;
#define GrB_IndexUnaryOp_new(op,f,z,x,y) \
GxB_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
#define GrM_IndexUnaryOp_new(op,f,z,x,y) \
GxM_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_new // create a named user-created IndexUnaryOp
(
GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator
GxB_index_unary_function function, // pointer to index_unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *idxop_name, // name of the user function
const char *idxop_defn // definition of the user function
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // IndexUnary operator
) ;
// For TRIL, TRIU, DIAG, OFFDIAG, COLLE, COLGT, ROWLE, and ROWGT,
// the xtype_name is an empty string (""), since these functions do not depend
// on the type of the matrix input.
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // select operator
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_ytype_name // return the type_name of the scalary y
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // select operator
) ;
GB_PUBLIC
GrB_Info GrB_IndexUnaryOp_free // free a user-created IndexUnaryOp
(
GrB_IndexUnaryOp *op // handle of IndexUnary to free
) ;
//------------------------------------------------------------------------------
// built-in IndexUnaryOps
//------------------------------------------------------------------------------
// To facilitate computations with negative integers, the indices i and j are
// of type int64_t. The scalar y has the type corresponding to the suffix
// of the name of the operator.
GB_PUBLIC GrB_IndexUnaryOp
//--------------------------------------------------------------------------
// Result has the integer type INT32 or INT64, the same as the suffix
//--------------------------------------------------------------------------
// These operators work on any data type, including user-defined.
// ROWINDEX: (i+y): row index plus y
GrB_ROWINDEX_INT32, GrB_ROWINDEX_INT64,
// COLINDEX: (j+y): col index plus y
GrB_COLINDEX_INT32, GrB_COLINDEX_INT64,
// DIAGINDEX: (j-(i+y)): diagonal index plus y
GrB_DIAGINDEX_INT32, GrB_DIAGINDEX_INT64,
//--------------------------------------------------------------------------
// Result is bool, depending only on the indices i,j, and y
//--------------------------------------------------------------------------
// These operators work on any data type, including user-defined.
// The scalar y is int64.
// TRIL: (j <= (i+y)): lower triangular part
GrB_TRIL,
// TRIU: (j >= (i+y)): upper triangular part
GrB_TRIU,
// DIAG: (j == (i+y)): diagonal
GrB_DIAG,
// OFFDIAG: (j != (i+y)): offdiagonal
GrB_OFFDIAG,
// COLLE: (j <= y): columns 0:y
GrB_COLLE,
// COLGT: (j > y): columns y+1:ncols-1
GrB_COLGT,
// ROWLE: (i <= y): rows 0:y
GrB_ROWLE,
// ROWGT: (i > y): rows y+1:nrows-1
GrB_ROWGT,
//--------------------------------------------------------------------------
// Result is bool, depending only on the value aij
//--------------------------------------------------------------------------
// These operators work on matrices and vectors of any built-in type,
// including complex types. aij and the scalar y have the same type as the
// operator suffix.
// VALUEEQ: (aij == y)
GrB_VALUEEQ_INT8, GrB_VALUEEQ_UINT8, GrB_VALUEEQ_FP32, GrB_VALUEEQ_BOOL,
GrB_VALUEEQ_INT16, GrB_VALUEEQ_UINT16, GrB_VALUEEQ_FP64,
GrB_VALUEEQ_INT32, GrB_VALUEEQ_UINT32, GxB_VALUEEQ_FC32,
GrB_VALUEEQ_INT64, GrB_VALUEEQ_UINT64, GxB_VALUEEQ_FC64,
// VALUENE: (aij != y)
GrB_VALUENE_INT8, GrB_VALUENE_UINT8, GrB_VALUENE_FP32, GrB_VALUENE_BOOL,
GrB_VALUENE_INT16, GrB_VALUENE_UINT16, GrB_VALUENE_FP64,
GrB_VALUENE_INT32, GrB_VALUENE_UINT32, GxB_VALUENE_FC32,
GrB_VALUENE_INT64, GrB_VALUENE_UINT64, GxB_VALUENE_FC64,
// These operators work on matrices and vectors of any real (non-complex)
// built-in type.
// VALUELT: (aij < y)
GrB_VALUELT_INT8, GrB_VALUELT_UINT8, GrB_VALUELT_FP32, GrB_VALUELT_BOOL,
GrB_VALUELT_INT16, GrB_VALUELT_UINT16, GrB_VALUELT_FP64,
GrB_VALUELT_INT32, GrB_VALUELT_UINT32,
GrB_VALUELT_INT64, GrB_VALUELT_UINT64,
// VALUELE: (aij <= y)
GrB_VALUELE_INT8, GrB_VALUELE_UINT8, GrB_VALUELE_FP32, GrB_VALUELE_BOOL,
GrB_VALUELE_INT16, GrB_VALUELE_UINT16, GrB_VALUELE_FP64,
GrB_VALUELE_INT32, GrB_VALUELE_UINT32,
GrB_VALUELE_INT64, GrB_VALUELE_UINT64,
// VALUEGT: (aij > y)
GrB_VALUEGT_INT8, GrB_VALUEGT_UINT8, GrB_VALUEGT_FP32, GrB_VALUEGT_BOOL,
GrB_VALUEGT_INT16, GrB_VALUEGT_UINT16, GrB_VALUEGT_FP64,
GrB_VALUEGT_INT32, GrB_VALUEGT_UINT32,
GrB_VALUEGT_INT64, GrB_VALUEGT_UINT64,
// VALUEGE: (aij >= y)
GrB_VALUEGE_INT8, GrB_VALUEGE_UINT8, GrB_VALUEGE_FP32, GrB_VALUEGE_BOOL,
GrB_VALUEGE_INT16, GrB_VALUEGE_UINT16, GrB_VALUEGE_FP64,
GrB_VALUEGE_INT32, GrB_VALUEGE_UINT32,
GrB_VALUEGE_INT64, GrB_VALUEGE_UINT64 ;
//==============================================================================
// GrB_Monoid
//==============================================================================
// A monoid is an associative operator z=op(x,y) where all three types of z, x,
// and y are identical. The monoid also has an identity element, such that
// op(x,identity) = op(identity,x) = x.
typedef struct GB_Monoid_opaque *GrB_Monoid ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_BOOL // create a new boolean monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
bool identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT8 // create a new int8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int8_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT8 // create a new uint8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint8_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT16 // create a new int16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int16_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT16 // create a new uint16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint16_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT32 // create a new int32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT32 // create a new uint32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT64 // create a new int64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT64 // create a new uint64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_FP32 // create a new float monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
float identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_FP64 // create a new double monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
double identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_new_FC32 // create a new float complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_new_FC64 // create a new double complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UDT // create a monoid with a user-defined type
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
void *identity // identity value of the monoid
) ;
// Type-generic method for creating a new monoid:
/*
GB_PUBLIC
GrB_Info GrB_Monoid_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
<type> identity // identity value of the monoid
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Monoid_new(monoid,op,identity) \
_Generic \
( \
(identity), \
GB_CASES (, GrB, Monoid_new) \
) \
(monoid, op, identity)
#endif
// GxB_Monoid_terminal_new is identical to GrB_Monoid_new, except that a
// terminal value can be specified. The terminal may be NULL, which indicates
// no terminal value (and in this case, it is identical to GrB_Monoid_new).
// The terminal value, if not NULL, must have the same type as the identity.
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_BOOL // create a new boolean monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
bool identity, // identity value of the monoid
bool terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT8 // create a new int8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int8_t identity, // identity value of the monoid
int8_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT8 // create a new uint8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint8_t identity, // identity value of the monoid
uint8_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT16 // create a new int16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int16_t identity, // identity value of the monoid
int16_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT16 // create a new uint16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint16_t identity, // identity value of the monoid
uint16_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT32 // create a new int32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int32_t identity, // identity value of the monoid
int32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT32 // create a new uint32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint32_t identity, // identity value of the monoid
uint32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT64 // create a new int64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int64_t identity, // identity value of the monoid
int64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT64 // create a new uint64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint64_t identity, // identity value of the monoid
uint64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FP32 // create a new float monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
float identity, // identity value of the monoid
float terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FP64 // create a new double monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
double identity, // identity value of the monoid
double terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FC32 // create a new float complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC32_t identity, // identity value of the monoid
GxB_FC32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FC64 // create a new double complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC64_t identity, // identity value of the monoid
GxB_FC64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UDT // create a monoid with a user type
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
void *identity, // identity value of the monoid
void *terminal // terminal value of the monoid
) ;
// Type-generic method for creating a new monoid with a terminal value:
/*
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
<type> identity, // identity value of the monoid
<type> terminal // terminal value of the monoid
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GxB_Monoid_terminal_new(monoid,op,identity,terminal) \
_Generic \
( \
(identity), \
GB_CASES (, GxB, Monoid_terminal_new) \
) \
(monoid, op, identity, terminal)
#endif
GB_PUBLIC
GrB_Info GxB_Monoid_operator // return the monoid operator
(
GrB_BinaryOp *op, // returns the binary op of the monoid
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_identity // return the monoid identity
(
void *identity, // returns the identity of the monoid
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal // return the monoid terminal
(
bool *has_terminal, // true if the monoid has a terminal value
void *terminal, // returns the terminal of the monoid,
// unmodified if has_terminal is false
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_free // free a user-created monoid
(
GrB_Monoid *monoid // handle of monoid to free
) ;
//==============================================================================
// GrB_Semiring
//==============================================================================
typedef struct GB_Semiring_opaque *GrB_Semiring ;
GB_PUBLIC
GrB_Info GrB_Semiring_new // create a semiring
(
GrB_Semiring *semiring, // handle of semiring to create
GrB_Monoid add, // add monoid of the semiring
GrB_BinaryOp multiply // multiply operator of the semiring
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_add // return the add monoid of a semiring
(
GrB_Monoid *add, // returns add monoid of the semiring
GrB_Semiring semiring // semiring to query
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_multiply // return multiply operator of a semiring
(
GrB_BinaryOp *multiply, // returns multiply operator of the semiring
GrB_Semiring semiring // semiring to query
) ;
GB_PUBLIC
GrB_Info GrB_Semiring_free // free a user-created semiring
(
GrB_Semiring *semiring // handle of semiring to free
) ;
//==============================================================================
// GrB_Scalar: a GraphBLAS scalar
//==============================================================================
// GxB_Scalar has become GrB_Scalar. The older name GxB_Scalar is kept as
// historical, but GrB_Scalar should be used instead.
typedef struct GB_Scalar_opaque *GxB_Scalar ; // historical: use GrB_Scalar
typedef struct GB_Scalar_opaque *GrB_Scalar ; // use this instead
// These methods create, free, copy, and clear a GrB_Scalar. The nvals,
// and type methods return basic information about a GrB_Scalar.
GB_PUBLIC
GrB_Info GrB_Scalar_new // create a new GrB_Scalar with no entry
(
GrB_Scalar *s, // handle of GrB_Scalar to create
GrB_Type type // type of GrB_Scalar to create
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_dup // make an exact copy of a GrB_Scalar
(
GrB_Scalar *s, // handle of output GrB_Scalar to create
const GrB_Scalar t // input GrB_Scalar to copy
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_clear // clear a GrB_Scalar of its entry
( // type remains unchanged.
GrB_Scalar s // GrB_Scalar to clear
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar
(
GrB_Index *nvals, // GrB_Scalar has nvals entries (0 or 1)
const GrB_Scalar s // GrB_Scalar to query
) ;
// NOTE: GxB_Scalar_type is historical. Use GxB_Scalar_type_name instead.
GB_PUBLIC
GrB_Info GxB_Scalar_type // get the type of a GrB_Scalar
(
GrB_Type *type, // returns the type of the GrB_Scalar
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_type_name // return the name of the type of a scalar
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_memoryUsage // return # of bytes used for a scalar
(
size_t *size, // # of bytes used by the scalar s
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_free // free a GrB_Scalar
(
GrB_Scalar *s // handle of GrB_Scalar to free
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_new (GrB_Scalar *s, GrB_Type type) ;
GB_PUBLIC GrB_Info GxB_Scalar_dup (GrB_Scalar *s, const GrB_Scalar t) ;
GB_PUBLIC GrB_Info GxB_Scalar_clear (GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_nvals (GrB_Index *nvals, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_free (GrB_Scalar *s) ;
//------------------------------------------------------------------------------
// GrB_Scalar_setElement
//------------------------------------------------------------------------------
// Set a single GrB_Scalar s, from a user scalar x: s = x, typecasting from the
// type of x to the type of w as needed.
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_BOOL // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
bool x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT8 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int8_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT8 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint8_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT16 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int16_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT16 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint16_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_FP32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
float x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_FP64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
double x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_setElement_FC32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
GxB_FC32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_setElement_FC64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
GxB_FC64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UDT // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
void *x // user scalar to assign to s
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_setElement_BOOL (GrB_Scalar s, bool x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT8 (GrB_Scalar s, int8_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT16 (GrB_Scalar s, int16_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT32 (GrB_Scalar s, int32_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT64 (GrB_Scalar s, int64_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT8 (GrB_Scalar s, uint8_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT16 (GrB_Scalar s, uint16_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT32 (GrB_Scalar s, uint32_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT64 (GrB_Scalar s, uint64_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP32 (GrB_Scalar s, float x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP64 (GrB_Scalar s, double x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UDT (GrB_Scalar s, void *x) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Scalar_setElement // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
<type> x // user scalar to assign to s
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Scalar_setElement(s,x) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Scalar_setElement) \
) \
(s, x)
#define GxB_Scalar_setElement(s,x) GrB_Scalar_setElement (s, x)
#endif
//------------------------------------------------------------------------------
// GrB_Scalar_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a GrB_Scalar, x = s, typecasting from the type
// of s to the type of x as needed.
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_BOOL // x = s
(
bool *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT8 // x = s
(
int8_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT8 // x = s
(
uint8_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT16 // x = s
(
int16_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT16 // x = s
(
uint16_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT32 // x = s
(
int32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT32 // x = s
(
uint32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT64 // x = s
(
int64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT64 // x = s
(
uint64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_FP32 // x = s
(
float *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_FP64 // x = s
(
double *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_extractElement_FC32 // x = s
(
GxB_FC32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_extractElement_FC64 // x = s
(
GxB_FC64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UDT // x = s
(
void *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_BOOL (bool *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT8 (int8_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT16 (int16_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT32 (int32_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT64 (int64_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT8 (uint8_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT16 (uint16_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT32 (uint32_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT64 (uint64_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP32 (float *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP64 (double *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UDT (void *x, const GrB_Scalar s) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement // x = s
(
<type> *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Scalar_extractElement(x,s) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Scalar_extractElement) \
) \
(x, s)
#define GxB_Scalar_extractElement(x,s) GrB_Scalar_extractElement (x, s)
#endif
//==============================================================================
// GrB_Vector: a GraphBLAS vector
//==============================================================================
typedef struct GB_Vector_opaque *GrB_Vector ;
// These methods create, free, copy, and clear a vector. The size, nvals,
// and type methods return basic information about a vector.
GB_PUBLIC
GrB_Info GrB_Vector_new // create a new vector with no entries
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n // vector dimension is n-by-1
// (n must be <= GrB_INDEX_MAX+1)
) ;
GB_PUBLIC
GrB_Info GrB_Vector_dup // make an exact copy of a vector
(
GrB_Vector *w, // handle of output vector to create
const GrB_Vector u // input vector to copy
) ;
GB_PUBLIC
GrB_Info GrB_Vector_clear // clear a vector of all entries;
( // type and dimension remain unchanged.
GrB_Vector v // vector to clear
) ;
GB_PUBLIC
GrB_Info GrB_Vector_size // get the dimension of a vector
(
GrB_Index *n, // vector dimension is n-by-1
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GrB_Vector_nvals // get the number of entries in a vector
(
GrB_Index *nvals, // vector has nvals entries
const GrB_Vector v // vector to query
) ;
// NOTE: GxB_Vector_type is historical. Use GxB_Vector_type_name instead.
GB_PUBLIC
GrB_Info GxB_Vector_type // get the type of a vector
(
GrB_Type *type, // returns the type of the vector
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_type_name // return the name of the type of a vector
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_memoryUsage // return # of bytes used for a vector
(
size_t *size, // # of bytes used by the vector v
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_iso // return iso status of a vector
(
bool *iso, // true if the vector is iso-valued
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GrB_Vector_free // free a vector
(
GrB_Vector *v // handle of vector to free
) ;
//------------------------------------------------------------------------------
// GrB_Vector_build
//------------------------------------------------------------------------------
// GrB_Vector_build: w = sparse (I,1,X), but using any
// associative operator to assemble duplicate entries.
GB_PUBLIC
GrB_Info GrB_Vector_build_BOOL // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const bool *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT8 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT8 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT16 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT16 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_FP32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const float *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_FP64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const double *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_FC32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const GxB_FC32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_FC64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const GxB_FC64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UDT // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const void *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_Scalar // build a vector from (i,scalar) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
GrB_Scalar scalar, // value for all tuples
GrB_Index nvals // number of tuples
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_build // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const <type> *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_build(w,I,X,nvals,dup) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Vector_build) \
) \
(w, I, ((const void *) (X)), nvals, dup)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_setElement
//------------------------------------------------------------------------------
// Set a single scalar in a vector, w(i) = x, typecasting from the type of x to
// the type of w as needed.
GB_PUBLIC
GrB_Info GrB_Vector_setElement_BOOL // w(i) = x
(
GrB_Vector w, // vector to modify
bool x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT8 // w(i) = x
(
GrB_Vector w, // vector to modify
int8_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT8 // w(i) = x
(
GrB_Vector w, // vector to modify
uint8_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT16 // w(i) = x
(
GrB_Vector w, // vector to modify
int16_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT16 // w(i) = x
(
GrB_Vector w, // vector to modify
uint16_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT32 // w(i) = x
(
GrB_Vector w, // vector to modify
int32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT32 // w(i) = x
(
GrB_Vector w, // vector to modify
uint32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT64 // w(i) = x
(
GrB_Vector w, // vector to modify
int64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT64 // w(i) = x
(
GrB_Vector w, // vector to modify
uint64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_FP32 // w(i) = x
(
GrB_Vector w, // vector to modify
float x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_FP64 // w(i) = x
(
GrB_Vector w, // vector to modify
double x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_setElement_FC32 // w(i) = x
(
GrB_Vector w, // vector to modify
GxB_FC32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_setElement_FC64 // w(i) = x
(
GrB_Vector w, // vector to modify
GxB_FC64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UDT // w(i) = x
(
GrB_Vector w, // vector to modify
void *x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_Scalar // w(i) = x
(
GrB_Vector w, // vector to modify
GrB_Scalar x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_setElement // w(i) = x
(
GrB_Vector w, // vector to modify
<type> x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_setElement(w,x,i) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Vector_setElement), \
default: GrB_Vector_setElement_Scalar \
) \
(w, x, i)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a vector, x = v(i), typecasting from the type of
// v to the type of x as needed.
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_BOOL // x = v(i)
(
bool *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT8 // x = v(i)
(
int8_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT8 // x = v(i)
(
uint8_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT16 // x = v(i)
(
int16_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT16 // x = v(i)
(
uint16_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT32 // x = v(i)
(
int32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT32 // x = v(i)
(
uint32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT64 // x = v(i)
(
int64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT64 // x = v(i)
(
uint64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_FP32 // x = v(i)
(
float *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_FP64 // x = v(i)
(
double *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractElement_FC32 // x = v(i)
(
GxB_FC32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractElement_FC64 // x = v(i)
(
GxB_FC64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UDT // x = v(i)
(
void *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_Scalar // x = v(i)
(
GrB_Scalar x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_extractElement // x = v(i)
(
<type> *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_extractElement(x,v,i) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Vector_extractElement), \
default: GrB_Vector_extractElement_Scalar \
) \
(x, v, i)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_removeElement
//------------------------------------------------------------------------------
// GrB_Vector_removeElement (v,i) removes the element v(i) from the vector v.
GB_PUBLIC
GrB_Info GrB_Vector_removeElement
(
GrB_Vector v, // vector to remove an element from
GrB_Index i // index
) ;
//------------------------------------------------------------------------------
// GrB_Vector_extractTuples
//------------------------------------------------------------------------------
// Extracts all tuples from a vector, like [I,~,X] = find (v). If
// any parameter I and/or X is NULL, then that component is not extracted. For
// example, to extract just the row indices, pass I as non-NULL, and X as NULL.
// This is like [I,~,~] = find (v).
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_BOOL // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
bool *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT8 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT8 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT16 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT16 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_FP32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
float *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_FP64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
double *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractTuples_FC32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
GxB_FC32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractTuples_FC64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
GxB_FC64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UDT // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
void *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
<type> *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_extractTuples(I,X,nvals,v) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Vector_extractTuples) \
) \
(I, X, nvals, v)
#endif
//==============================================================================
// GrB_Matrix: a GraphBLAS matrix
//==============================================================================
typedef struct GB_Matrix_opaque *GrB_Matrix ;
// These methods create, free, copy, and clear a matrix. The nrows, ncols,
// nvals, and type methods return basic information about a matrix.
GB_PUBLIC
GrB_Info GrB_Matrix_new // create a new matrix with no entries
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // matrix dimension is nrows-by-ncols
GrB_Index ncols // (nrows and ncols must be <= GrB_INDEX_MAX+1)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_dup // make an exact copy of a matrix
(
GrB_Matrix *C, // handle of output matrix to create
const GrB_Matrix A // input matrix to copy
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_clear // clear a matrix of all entries;
( // type and dimensions remain unchanged
GrB_Matrix A // matrix to clear
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix
(
GrB_Index *nrows, // matrix has nrows rows
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_ncols // get the number of columns of a matrix
(
GrB_Index *ncols, // matrix has ncols columns
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix
(
GrB_Index *nvals, // matrix has nvals entries
const GrB_Matrix A // matrix to query
) ;
// NOTE: GxB_Matrix_type is historical. Use GxB_Matrix_type_name instead.
GB_PUBLIC
GrB_Info GxB_Matrix_type // get the type of a matrix
(
GrB_Type *type, // returns the type of the matrix
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_type_name // return the name of the type of a matrix
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_memoryUsage // return # of bytes used for a matrix
(
size_t *size, // # of bytes used by the matrix A
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_iso // return iso status of a matrix
(
bool *iso, // true if the matrix is iso-valued
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_free // free a matrix
(
GrB_Matrix *A // handle of matrix to free
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_build
//------------------------------------------------------------------------------
// GrB_Matrix_build: C = sparse (I,J,X), but using any
// associative operator to assemble duplicate entries.
GB_PUBLIC
GrB_Info GrB_Matrix_build_BOOL // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const bool *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT8 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT8 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT16 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT16 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_FP32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const float *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_FP64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const double *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_FC32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const GxB_FC32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_FC64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const GxB_FC64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UDT // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const void *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_Scalar // build a matrix from (I,J,scalar) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
GrB_Scalar scalar, // value for all tuples
GrB_Index nvals // number of tuples
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_build // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const <type> *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_build(C,I,J,X,nvals,dup) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Matrix_build) \
) \
(C, I, J, ((const void *) (X)), nvals, dup)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_setElement
//------------------------------------------------------------------------------
// Set a single entry in a matrix, C(i,j) = x, typecasting
// from the type of x to the type of C, as needed.
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_BOOL // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
bool x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT8 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int8_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT8 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint8_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT16 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int16_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT16 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint16_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_FP32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
float x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_FP64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
double x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_setElement_FC32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GxB_FC32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_setElement_FC64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GxB_FC64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UDT // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
void *x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_Scalar // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GrB_Scalar x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_setElement // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
<type> x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_setElement(C,x,i,j) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Matrix_setElement), \
default: GrB_Matrix_setElement_Scalar \
) \
(C, x, i, j)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a matrix, x = A(i,j), typecasting from the type
// of A to the type of x, as needed.
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_BOOL // x = A(i,j)
(
bool *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT8 // x = A(i,j)
(
int8_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT8 // x = A(i,j)
(
uint8_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT16 // x = A(i,j)
(
int16_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT16 // x = A(i,j)
(
uint16_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT32 // x = A(i,j)
(
int32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT32 // x = A(i,j)
(
uint32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT64 // x = A(i,j)
(
int64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT64 // x = A(i,j)
(
uint64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_FP32 // x = A(i,j)
(
float *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_FP64 // x = A(i,j)
(
double *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractElement_FC32 // x = A(i,j)
(
GxB_FC32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractElement_FC64 // x = A(i,j)
(
GxB_FC64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UDT // x = A(i,j)
(
void *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_Scalar // x = A(i,j)
(
GrB_Scalar x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement // x = A(i,j)
(
<type> *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_extractElement(x,A,i,j) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Matrix_extractElement), \
default: GrB_Matrix_extractElement_Scalar \
) \
(x, A, i, j)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_removeElement
//------------------------------------------------------------------------------
// GrB_Matrix_removeElement (A,i,j) removes the entry A(i,j) from the matrix A.
GB_PUBLIC
GrB_Info GrB_Matrix_removeElement
(
GrB_Matrix C, // matrix to remove entry from
GrB_Index i, // row index
GrB_Index j // column index
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_extractTuples
//------------------------------------------------------------------------------
// Extracts all tuples from a matrix, like [I,J,X] = find (A). If
// any parameter I, J and/or X is NULL, then that component is not extracted.
// For example, to extract just the row and col indices, pass I and J as
// non-NULL, and X as NULL. This is like [I,J,~] = find (A).
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_BOOL // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
bool *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT8 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT8 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT16 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT16 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_FP32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
float *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_FP64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
double *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractTuples_FC32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
GxB_FC32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractTuples_FC64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
GxB_FC64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UDT // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
void *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
<type> *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_extractTuples(I,J,X,nvals,A) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Matrix_extractTuples) \
) \
(I, J, X, nvals, A)
#endif
//------------------------------------------------------------------------------
// GxB_Matrix_concat and GxB_Matrix_split
//------------------------------------------------------------------------------
// GxB_Matrix_concat concatenates an array of matrices (Tiles) into a single
// GrB_Matrix C.
// Tiles is an m-by-n dense array of matrices held in row-major format, where
// Tiles [i*n+j] is the (i,j)th tile, and where m > 0 and n > 0 must hold. Let
// A{i,j} denote the (i,j)th tile. The matrix C is constructed by
// concatenating these tiles together, as:
// C = [ A{0,0} A{0,1} A{0,2} ... A{0,n-1}
// A{1,0} A{1,1} A{1,2} ... A{1,n-1}
// ...
// A{m-1,0} A{m-1,1} A{m-1,2} ... A{m-1,n-1} ]
// On input, the matrix C must already exist. Any existing entries in C are
// discarded. C must have dimensions nrows by ncols where nrows is the sum of
// # of rows in the matrices A{i,0} for all i, and ncols is the sum of the # of
// columns in the matrices A{0,j} for all j. All matrices in any given tile
// row i must have the same number of rows (that is, nrows(A{i,0}) must equal
// nrows(A{i,j}) for all j), and all matrices in any given tile column j must
// have the same number of columns (that is, ncols(A{0,j}) must equal
// ncols(A{i,j}) for all i).
// The type of C is unchanged, and all matrices A{i,j} are typecasted into the
// type of C. Any settings made to C by GxB_Matrix_Option_set (format by row
// or by column, bitmap switch, hyper switch, and sparsity control) are
// unchanged.
GB_PUBLIC
GrB_Info GxB_Matrix_concat // concatenate a 2D array of matrices
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const GrB_Descriptor desc // unused, except threading control
) ;
// GxB_Matrix_split does the opposite of GxB_Matrix_concat. It splits a single
// input matrix A into a 2D array of tiles. On input, the Tiles array must be
// a non-NULL pointer to a previously allocated array of size at least m*n
// where both m and n must be > 0. The Tiles_nrows array has size m, and
// Tiles_ncols has size n. The (i,j)th tile has dimension
// Tiles_nrows[i]-by-Tiles_ncols[j]. The sum of Tiles_nrows [0:m-1] must equal
// the number of rows of A, and the sum of Tiles_ncols [0:n-1] must equal the
// number of columns of A. The type of each tile is the same as the type of A;
// no typecasting is done.
GB_PUBLIC
GrB_Info GxB_Matrix_split // split a matrix into 2D array of matrices
(
GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const GrB_Index *Tile_nrows, // array of size m
const GrB_Index *Tile_ncols, // array of size n
const GrB_Matrix A, // input matrix to split
const GrB_Descriptor desc // unused, except threading control
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_diag, GxB_Vector_diag, GrB_Matrix_diag
//------------------------------------------------------------------------------
// GxB_Matrix_diag constructs a matrix from a vector. Let n be the length of
// the v vector, from GrB_Vector_size (&n, v). If k = 0, then C is an n-by-n
// diagonal matrix with the entries from v along the main diagonal of C, with
// C(i,i) = v(i). If k is nonzero, C is square with dimension n+abs(k). If k
// is positive, it denotes diagonals above the main diagonal, with C(i,i+k) =
// v(i). If k is negative, it denotes diagonals below the main diagonal of C,
// with C(i-k,i) = v(i).
// C must already exist on input, of the correct size. Any existing entries in
// C are discarded. The type of C is preserved, so that if the type of C and v
// differ, the entries are typecasted into the type of C. Any settings made to
// C by GxB_Matrix_Option_set (format by row or by column, bitmap switch, hyper
// switch, and sparsity control) are unchanged.
GB_PUBLIC
GrB_Info GxB_Matrix_diag // construct a diagonal matrix from a vector
(
GrB_Matrix C, // output matrix
const GrB_Vector v, // input vector
int64_t k,
const GrB_Descriptor desc // to specify # of threads
) ;
// GrB_Matrix_diag is identical to GxB_Matrix_diag (C, v, k, NULL),
// using the default # of threads from the global setting.
GB_PUBLIC
GrB_Info GrB_Matrix_diag // construct a diagonal matrix from a vector
(
GrB_Matrix C, // output matrix
const GrB_Vector v, // input vector
int64_t k
) ;
// GxB_Vector_diag extracts a vector v from an input matrix A, which may be
// rectangular. If k = 0, the main diagonal of A is extracted; k > 0 denotes
// diagonals above the main diagonal of A, and k < 0 denotes diagonals below
// the main diagonal of A. Let A have dimension m-by-n. If k is in the range
// 0 to n-1, then v has length min(m,n-k). If k is negative and in the range
// -1 to -m+1, then v has length min(m+k,n). If k is outside these ranges,
// v has length 0 (this is not an error).
// v must already exist on input, of the correct length; that is
// GrB_Vector_size (&len,v) must return len = 0 if k >= n or k <= -m, len =
// min(m,n-k) if k is in the range 0 to n-1, and len = min(m+k,n) if k is in
// the range -1 to -m+1. Any existing entries in v are discarded. The type of
// v is preserved, so that if the type of A and v differ, the entries are
// typecasted into the type of v. Any settings made to v by
// GxB_Vector_Option_set (bitmap switch and sparsity control) are unchanged.
GB_PUBLIC
GrB_Info GxB_Vector_diag // extract a diagonal from a matrix, as a vector
(
GrB_Vector v, // output vector
const GrB_Matrix A, // input matrix
int64_t k,
const GrB_Descriptor desc // unused, except threading control
) ;
//==============================================================================
// SuiteSparse:GraphBLAS options
//==============================================================================
// The following options modify how SuiteSparse:GraphBLAS stores and operates
// on its matrices. The GxB_*Option* methods allow the user to suggest how the
// internal representation of a matrix, or all matrices, should be held. These
// options have no effect on the result (except for minor roundoff differences
// for floating-point types). They only affect the time and memory usage of the
// computations.
// GxB_Matrix_Option_set: sets an option for a specific matrix
// GxB_Matrix_Option_get: queries the current option of a specific matrix
// GxB_Vector_Option_set: sets an option for a specific vector
// GxB_Vector_Option_get: queries the current option of a specific vector
// GxB_Global_Option_set: sets an option for all future matrices
// GxB_Global_Option_get: queries current option for all future matrices
#define GxB_HYPER 0 // (historical, use GxB_HYPER_SWITCH)
typedef enum // for global options or matrix options
{
//------------------------------------------------------------
// for GxB_Matrix_Option_get/set and GxB_Global_Option_get/set:
//------------------------------------------------------------
GxB_HYPER_SWITCH = 0, // defines switch to hypersparse (a double value)
GxB_BITMAP_SWITCH = 34, // defines switch to bitmap (a double value)
GxB_FORMAT = 1, // defines CSR/CSC format: GxB_BY_ROW or GxB_BY_COL
//------------------------------------------------------------
// for GxB_Global_Option_get only:
//------------------------------------------------------------
GxB_MODE = 2, // mode passed to GrB_init (blocking or non-blocking)
GxB_LIBRARY_NAME = 8, // name of the library (char *)
GxB_LIBRARY_VERSION = 9, // library version (3 int's)
GxB_LIBRARY_DATE = 10, // date of the library (char *)
GxB_LIBRARY_ABOUT = 11, // about the library (char *)
GxB_LIBRARY_URL = 12, // URL for the library (char *)
GxB_LIBRARY_LICENSE = 13, // license of the library (char *)
GxB_LIBRARY_COMPILE_DATE = 14, // date library was compiled (char *)
GxB_LIBRARY_COMPILE_TIME = 15, // time library was compiled (char *)
GxB_API_VERSION = 16, // API version (3 int's)
GxB_API_DATE = 17, // date of the API (char *)
GxB_API_ABOUT = 18, // about the API (char *)
GxB_API_URL = 19, // URL for the API (char *)
GxB_COMPILER_VERSION = 23, // compiler version (3 int's)
GxB_COMPILER_NAME = 24, // compiler name (char *)
//------------------------------------------------------------
// for GxB_Global_Option_get/set only:
//------------------------------------------------------------
GxB_GLOBAL_NTHREADS = GxB_NTHREADS, // max number of threads to use
// If <= GxB_DEFAULT, then GraphBLAS selects the number
// of threads automatically.
GxB_GLOBAL_CHUNK = GxB_CHUNK, // chunk size for small problems.
// If <= GxB_DEFAULT, then the default is used.
GxB_BURBLE = 99, // diagnostic output (bool *)
GxB_PRINTF = 101, // printf function diagnostic output
GxB_FLUSH = 102, // flush function diagnostic output
GxB_MEMORY_POOL = 103, // memory pool control
GxB_PRINT_1BASED = 104, // print matrices as 0-based or 1-based
//------------------------------------------------------------
// for GxB_Matrix_Option_get only:
//------------------------------------------------------------
GxB_SPARSITY_STATUS = 33, // hyper, sparse, bitmap or full (1,2,4,8)
GxB_IS_HYPER = 6, // historical; use GxB_SPARSITY_STATUS
//------------------------------------------------------------
// for GxB_Matrix_Option_get/set only:
//------------------------------------------------------------
GxB_SPARSITY_CONTROL = 32, // sparsity control: 0 to 15; see below
//------------------------------------------------------------
// GPU and options (DRAFT: do not use)
//------------------------------------------------------------
GxB_GLOBAL_GPU_CONTROL = GxB_GPU_CONTROL,
GxB_GLOBAL_GPU_CHUNK = GxB_GPU_CHUNK,
} GxB_Option_Field ;
// GxB_FORMAT can be by row or by column:
typedef enum
{
GxB_BY_ROW = 0, // CSR: compressed sparse row format
GxB_BY_COL = 1, // CSC: compressed sparse column format
GxB_NO_FORMAT = -1 // format not defined
}
GxB_Format_Value ;
// The default format is by row. These constants are defined as GB_PUBLIC
// const, so that if SuiteSparse:GraphBLAS is recompiled with a different
// default format, and the application is relinked but not recompiled, it will
// acquire the new default values.
GB_PUBLIC const GxB_Format_Value GxB_FORMAT_DEFAULT ;
// the default hyper_switch parameter
GB_PUBLIC const double GxB_HYPER_DEFAULT ;
// GxB_SPARSITY_CONTROL can be any sum or bitwise OR of these 4 values:
#define GxB_HYPERSPARSE 1 // store matrix in hypersparse form
#define GxB_SPARSE 2 // store matrix as sparse form (compressed vector)
#define GxB_BITMAP 4 // store matrix as a bitmap
#define GxB_FULL 8 // store matrix as full; all entries must be present
// size of b array for GxB_set/get (GxB_BITMAP_SWITCH, b)
#define GxB_NBITMAP_SWITCH 8 // size of bitmap_switch parameter array
// any sparsity value:
#define GxB_ANY_SPARSITY (GxB_HYPERSPARSE + GxB_SPARSE + GxB_BITMAP + GxB_FULL)
// the default sparsity control is any format:
#define GxB_AUTO_SPARSITY GxB_ANY_SPARSITY
// GxB_Matrix_Option_set (A, GxB_SPARSITY_CONTROL, scontrol) provides hints
// about which data structure GraphBLAS should use for the matrix A:
//
// GxB_AUTO_SPARSITY: GraphBLAS selects automatically.
// GxB_HYPERSPARSE: always hypersparse, taking O(nvals(A)) space.
// GxB_SPARSE: always in a sparse struture: compressed-sparse row/column,
// taking O(nrows+nvals(A)) space if stored by row, or
// O(ncols+nvals(A)) if stored by column.
// GxB_BITMAP: always in a bitmap struture, taking O(nrows*ncols) space.
// GxB_FULL: always in a full structure, taking O(nrows*ncols) space,
// unless not all entries are present, in which case the bitmap
// storage is used.
//
// These options can be summed. For example, to allow a matrix to be sparse
// or hypersparse, but not bitmap or full, use GxB_SPARSE + GxB_HYPERSPARSE.
// Since GxB_FULL can only be used when all entries are present, matrices with
// the just GxB_FULL control setting are stored in bitmap form if any entries
// are not present.
//
// Only the least 4 bits of the sparsity control are considered, so the
// formats can be bitwise negated. For example, to allow for any format
// except full, use ~GxB_FULL.
//
// GxB_Matrix_Option_get (A, GxB_SPARSITY_STATUS, &sparsity) returns the
// current data structure currently used for the matrix A (either hypersparse,
// sparse, bitmap, or full).
//
// GxB_Matrix_Option_get (A, GxB_SPARSITY_CONTROL, &scontrol) returns the hint
// for how A should be stored (hypersparse, sparse, bitmap, or full, or any
// combination).
// GxB_HYPER_SWITCH:
// If the matrix or vector structure can be sparse or hypersparse, the
// GxB_HYPER_SWITCH parameter controls when each of these structures are
// used. The parameter is not used if the matrix or vector is full or
// bitmap.
//
// Let k be the actual number of non-empty vectors (with at least one
// entry). This value k is not dependent on whether or not the matrix is
// stored in hypersparse structure. Let n be the number of vectors (the #
// of columns if CSC, or rows if CSR). Let h be the value of the
// GxB_HYPER_SWITCH setting of the matrix.
//
// If a matrix is currently hypersparse, it can be converted to
// non-hypersparse if (n <= 1 || k > 2*n*h). Otherwise it stays
// hypersparse. If (n <= 1) the matrix is always stored as
// non-hypersparse.
//
// If currently non-hypersparse, it can be converted to hypersparse if (n
// > 1 && k <= n*h). Otherwise, it stays non-hypersparse. If (n <= 1)
// the matrix always remains non-hypersparse.
//
// Setting GxB_HYPER_SWITCH to GxB_ALWAYS_HYPER or GxB_NEVER_HYPER ensures
// a matrix always stays hypersparse, or always stays non-hypersparse,
// respectively.
GB_PUBLIC const double GxB_ALWAYS_HYPER, GxB_NEVER_HYPER ;
GB_PUBLIC
GrB_Info GxB_Matrix_Option_set // set an option in a matrix
(
GrB_Matrix A, // matrix to modify
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_Option_get // gets the current option of a matrix
(
GrB_Matrix A, // matrix to query
GxB_Option_Field field, // option to query
... // return value of the matrix option
) ;
GB_PUBLIC
GrB_Info GxB_Vector_Option_set // set an option in a vector
(
GrB_Vector A, // vector to modify
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Vector_Option_get // gets the current option of a vector
(
GrB_Vector A, // vector to query
GxB_Option_Field field, // option to query
... // return value of the vector option
) ;
// GxB_Global_Option_set controls the global defaults used when a new matrix is
// created. GrB_init defines the following initial settings:
//
// GxB_Global_Option_set (GxB_HYPER_SWITCH, GxB_HYPER_DEFAULT) ;
// GxB_Global_Option_set (GxB_BITMAP_SWITCH, NULL) ;
// GxB_Global_Option_set (GxB_FORMAT, GxB_FORMAT_DEFAULT) ;
//
// The compile-time constants GxB_HYPER_DEFAULT and GxB_FORMAT_DEFAULT are
// equal to 0.0625 and GxB_BY_ROW, by default. That is, by default, all new
// matrices are held by row in CSR format. If a matrix has fewer than n/16
// columns, it can be converted to hypersparse structure. If it has more than
// n/8 columns, it can be converted to a sparse structure. Modifying these
// global settings via GxB_Global_Option_set has no effect on matrices already
// created.
GB_PUBLIC
GrB_Info GxB_Global_Option_set // set a global default option
(
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Global_Option_get // gets the current global default option
(
GxB_Option_Field field, // option to query
... // return value of the global option
) ;
//------------------------------------------------------------------------------
// GxB_set and GxB_get
//------------------------------------------------------------------------------
// The simplest way to set/get a value of a GrB_Descriptor is with
// the generic GxB_set and GxB_get functions:
// GxB_set (desc, field, value) ;
// GxB_get (desc, field, &value) ;
// GxB_set and GxB_get are generic methods that and set or query the options in
// a GrB_Matrix, a GrB_Descriptor, or in the global options. They can be used
// with the following syntax. Note that GxB_NTHREADS can be used for both the
// global nthreads_max, and for the # of threads in the descriptor.
// To set/get the global options:
//
// GxB_set (GxB_HYPER_SWITCH, double h) ;
// GxB_set (GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ;
// GxB_set (GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ;
// GxB_get (GxB_HYPER_SWITCH, double *h) ;
//
// double b [GxB_NBITMAP_SWITCH] ;
// GxB_set (GxB_BITMAP_SWITCH, b) ;
// GxB_set (GxB_BITMAP_SWITCH, NULL) ; // set defaults
// GxB_get (GxB_BITMAP_SWITCH, b) ;
//
// GxB_set (GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GxB_NTHREADS, nthreads_max) ;
// GxB_get (GxB_NTHREADS, int *nthreads_max) ;
//
// GxB_set (GxB_CHUNK, double chunk) ;
// GxB_get (GxB_CHUNK, double *chunk) ;
//
// GxB_set (GxB_BURBLE, bool burble) ;
// GxB_get (GxB_BURBLE, bool *burble) ;
//
// GxB_set (GxB_PRINTF, void *printf_function) ;
// GxB_get (GxB_PRINTF, void **printf_function) ;
//
// GxB_set (GxB_FLUSH, void *flush_function) ;
// GxB_get (GxB_FLUSH, void **flush_function) ;
//
// int64_t free_pool_limit [64] ;
// GxB_set (GxB_MEMORY_POOL, free_pool_limit) ;
// GxB_set (GxB_MEMORY_POOL, NULL) ; // set defaults
// GxB_get (GxB_MEMORY_POOL, free_pool_limit) ;
// To get global options that can be queried but not modified:
//
// GxB_get (GxB_MODE, GrB_Mode *mode) ;
// To set/get a matrix option:
//
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, double h) ;
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ;
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ;
// GxB_get (GrB_Matrix A, GxB_HYPER_SWITCH, double *h) ;
//
// GxB_set (GrB_Matrix A, GxB_BITMAP_SWITCH, double b) ;
// GxB_get (GrB_Matrix A, GxB_BITMAP_SWITCH, double *b) ;
//
// GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GrB_Matrix A, GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ;
// GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, scontrol) ;
// GxB_get (GrB_Matrix A, GxB_SPARSITY_CONTROL, int *scontrol) ;
//
// GxB_get (GrB_Matrix A, GxB_SPARSITY_STATUS, int *sparsity) ;
// To set/get a vector option or status:
//
// GxB_set (GrB_Vector v, GxB_BITMAP_SWITCH, double b) ;
// GxB_get (GrB_Vector v, GxB_BITMAP_SWITCH, double *b) ;
//
// GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GrB_Vector v, GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ;
// GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, scontrol) ;
// GxB_get (GrB_Vector v, GxB_SPARSITY_CONTROL, int *scontrol) ;
//
// GxB_get (GrB_Vector v, GxB_SPARSITY_STATUS, int *sparsity) ;
// To set/get a descriptor field:
//
// GxB_set (GrB_Descriptor d, GrB_OUTP, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_OUTP, GrB_REPLACE) ;
// GxB_get (GrB_Descriptor d, GrB_OUTP, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_MASK, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_STRUCTURE) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP + GrB_STRUCTURE) ;
// GxB_get (GrB_Descriptor d, GrB_MASK, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_INP0, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_INP0, GrB_TRAN) ;
// GxB_get (GrB_Descriptor d, GrB_INP0, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_INP1, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_INP1, GrB_TRAN) ;
// GxB_get (GrB_Descriptor d, GrB_INP1, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_GUSTAVSON) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_HASH) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_SAXPY) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_DOT) ;
// GxB_get (GrB_Descriptor d, GrB_AxB_METHOD, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GxB_NTHREADS, nthreads) ;
// GxB_get (GrB_Descriptor d, GxB_NTHREADS, int *nthreads) ;
//
// GxB_set (GrB_Descriptor d, GxB_CHUNK, double chunk) ;
// GxB_get (GrB_Descriptor d, GxB_CHUNK, double *chunk) ;
//
// GxB_set (GrB_Descriptor d, GxB_SORT, int sort) ;
// GxB_get (GrB_Descriptor d, GxB_SORT, int *sort) ;
//
// GxB_set (GrB_Descriptor d, GxB_COMPRESSION, int method) ;
// GxB_get (GrB_Descriptor d, GxB_COMPRESSION, int *method) ;
//
// GxB_set (GrB_Descriptor d, GxB_IMPORT, int method) ;
// GxB_get (GrB_Descriptor d, GxB_IMPORT, int *method) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_set(arg1,...) \
_Generic \
( \
(arg1), \
int : GxB_Global_Option_set , \
GxB_Option_Field : GxB_Global_Option_set , \
GrB_Vector : GxB_Vector_Option_set , \
GrB_Matrix : GxB_Matrix_Option_set , \
GrB_Descriptor : GxB_Desc_set \
) \
(arg1, __VA_ARGS__)
#define GxB_get(arg1,...) \
_Generic \
( \
(arg1), \
const int : GxB_Global_Option_get , \
int : GxB_Global_Option_get , \
const GxB_Option_Field : GxB_Global_Option_get , \
GxB_Option_Field : GxB_Global_Option_get , \
const GrB_Vector : GxB_Vector_Option_get , \
GrB_Vector : GxB_Vector_Option_get , \
const GrB_Matrix : GxB_Matrix_Option_get , \
GrB_Matrix : GxB_Matrix_Option_get , \
const GrB_Descriptor : GxB_Desc_get , \
GrB_Descriptor : GxB_Desc_get \
) \
(arg1, __VA_ARGS__)
#endif
//==============================================================================
// GrB_free: free any GraphBLAS object
//==============================================================================
// for null and invalid objects
#define GrB_NULL NULL
#define GrB_INVALID_HANDLE NULL
#if GxB_STDC_VERSION >= 201112L
#define GrB_free(object) \
_Generic \
( \
(object), \
GrB_Type *: GrB_Type_free , \
GrB_UnaryOp *: GrB_UnaryOp_free , \
GrB_BinaryOp *: GrB_BinaryOp_free , \
GxB_SelectOp *: GxB_SelectOp_free , \
GrB_IndexUnaryOp *: GrB_IndexUnaryOp_free , \
GrB_Monoid *: GrB_Monoid_free , \
GrB_Semiring *: GrB_Semiring_free , \
GrB_Scalar *: GrB_Scalar_free , \
GrB_Vector *: GrB_Vector_free , \
GrB_Matrix *: GrB_Matrix_free , \
GrB_Descriptor *: GrB_Descriptor_free , \
GxB_Iterator *: GxB_Iterator_free \
) \
(object)
#endif
//==============================================================================
// GrB_wait: finish computations
//==============================================================================
typedef enum
{
GrB_COMPLETE = 0, // establishes a happens-before relation
GrB_MATERIALIZE = 1 // object is complete
}
GrB_WaitMode ;
// Finish all pending work in a specific object.
GB_PUBLIC GrB_Info GrB_Type_wait (GrB_Type type , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_UnaryOp_wait (GrB_UnaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_BinaryOp_wait (GrB_BinaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GxB_SelectOp_wait (GxB_SelectOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_IndexUnaryOp_wait (GrB_IndexUnaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Monoid_wait (GrB_Monoid monoid , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Semiring_wait (GrB_Semiring semiring, GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Descriptor_wait (GrB_Descriptor desc , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Scalar_wait (GrB_Scalar s , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Vector_wait (GrB_Vector v , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Matrix_wait (GrB_Matrix A , GrB_WaitMode waitmode) ;
// GrB_wait (object,waitmode) polymorphic function:
#if GxB_STDC_VERSION >= 201112L
#define GrB_wait(object,waitmode) \
_Generic \
( \
(object), \
GrB_Type : GrB_Type_wait , \
GrB_UnaryOp : GrB_UnaryOp_wait , \
GrB_BinaryOp : GrB_BinaryOp_wait , \
GxB_SelectOp : GxB_SelectOp_wait , \
GrB_IndexUnaryOp : GrB_IndexUnaryOp_wait , \
GrB_Monoid : GrB_Monoid_wait , \
GrB_Semiring : GrB_Semiring_wait , \
GrB_Scalar : GrB_Scalar_wait , \
GrB_Vector : GrB_Vector_wait , \
GrB_Matrix : GrB_Matrix_wait , \
GrB_Descriptor : GrB_Descriptor_wait \
) \
(object, waitmode)
#endif
// NOTE: GxB_Scalar_wait is historical; use GrB_Scalar_wait instead
GB_PUBLIC GrB_Info GxB_Scalar_wait (GrB_Scalar *s) ;
//==============================================================================
// GrB_error: error handling
//==============================================================================
// Each GraphBLAS method and operation returns a GrB_Info error code.
// GrB_error returns additional information on the error in a thread-safe
// null-terminated string. The string returned by GrB_error is owned by
// the GraphBLAS library and must not be free'd.
GB_PUBLIC GrB_Info GrB_Type_error (const char **error, const GrB_Type type) ;
GB_PUBLIC GrB_Info GrB_UnaryOp_error (const char **error, const GrB_UnaryOp op) ;
GB_PUBLIC GrB_Info GrB_BinaryOp_error (const char **error, const GrB_BinaryOp op) ;
GB_PUBLIC GrB_Info GxB_SelectOp_error (const char **error, const GxB_SelectOp op) ;
GB_PUBLIC GrB_Info GrB_IndexUnaryOp_error (const char **error, const GrB_IndexUnaryOp op) ;
GB_PUBLIC GrB_Info GrB_Monoid_error (const char **error, const GrB_Monoid monoid) ;
GB_PUBLIC GrB_Info GrB_Semiring_error (const char **error, const GrB_Semiring semiring) ;
GB_PUBLIC GrB_Info GrB_Scalar_error (const char **error, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GrB_Vector_error (const char **error, const GrB_Vector v) ;
GB_PUBLIC GrB_Info GrB_Matrix_error (const char **error, const GrB_Matrix A) ;
GB_PUBLIC GrB_Info GrB_Descriptor_error (const char **error, const GrB_Descriptor d) ;
// GxB_Scalar_error is historical: use GrB_Scalar_error instead
GB_PUBLIC GrB_Info GxB_Scalar_error (const char **error, const GrB_Scalar s) ;
// GrB_error (error,object) polymorphic function:
#if GxB_STDC_VERSION >= 201112L
#define GrB_error(error,object) \
_Generic \
( \
(object), \
const GrB_Type : GrB_Type_error , \
GrB_Type : GrB_Type_error , \
const GrB_UnaryOp : GrB_UnaryOp_error , \
GrB_UnaryOp : GrB_UnaryOp_error , \
const GrB_BinaryOp : GrB_BinaryOp_error , \
GrB_BinaryOp : GrB_BinaryOp_error , \
const GxB_SelectOp : GxB_SelectOp_error , \
GxB_SelectOp : GxB_SelectOp_error , \
const GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \
GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \
const GrB_Monoid : GrB_Monoid_error , \
GrB_Monoid : GrB_Monoid_error , \
const GrB_Semiring : GrB_Semiring_error , \
GrB_Semiring : GrB_Semiring_error , \
const GrB_Scalar : GrB_Scalar_error , \
GrB_Scalar : GrB_Scalar_error , \
const GrB_Vector : GrB_Vector_error , \
GrB_Vector : GrB_Vector_error , \
const GrB_Matrix : GrB_Matrix_error , \
GrB_Matrix : GrB_Matrix_error , \
const GrB_Descriptor : GrB_Descriptor_error , \
GrB_Descriptor : GrB_Descriptor_error \
) \
(error, object)
#endif
//==============================================================================
// GrB_mxm, vxm, mxv: matrix multiplication over a semiring
//==============================================================================
GB_PUBLIC
GrB_Info GrB_mxm // C<Mask> = accum (C, A*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '+' and '*' for A*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_vxm // w'<Mask> = accum (w, u'*A)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' and '*' for u'*A
const GrB_Vector u, // first input: vector u
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_mxv // w<Mask> = accum (w, A*u)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' and '*' for A*B
const GrB_Matrix A, // first input: matrix A
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//==============================================================================
// GrB_eWiseMult: element-wise matrix and vector operations, set intersection
//==============================================================================
// GrB_eWiseMult computes C<Mask> = accum (C, A.*B), where ".*" is the Hadamard
// product, and where pairs of elements in two matrices (or vectors) are
// pairwise "multiplied" with C(i,j) = mult (A(i,j),B(i,j)).
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_Semiring // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_Monoid // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_BinaryOp // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp mult, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_Semiring // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_Monoid // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_BinaryOp // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp mult, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
// All 6 of the above type-specific functions are captured in a single
// type-generic function, GrB_eWiseMult:
#if GxB_STDC_VERSION >= 201112L
#define GrB_eWiseMult(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(C), \
GrB_Matrix : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \
GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \
const GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \
GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \
const GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp \
), \
GrB_Vector : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \
GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \
const GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \
GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \
const GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp , \
GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp \
) \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GrB_eWiseAdd: element-wise matrix and vector operations, set union
//==============================================================================
// GrB_eWiseAdd computes C<Mask> = accum (C, A+B), where pairs of elements in
// two matrices (or two vectors) are pairwise "added".
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_Semiring // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_Monoid // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_BinaryOp // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp add, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_Semiring // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_Monoid // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_BinaryOp // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp add, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_eWiseAdd(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(C), \
GrB_Matrix : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \
GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \
const GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \
GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \
const GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp \
), \
GrB_Vector : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \
GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \
const GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \
GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \
const GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp , \
GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp \
) \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GxB_eWiseUnion: a variant of GrB_eWiseAdd
//==============================================================================
// GxB_eWiseUnion is a variant of eWiseAdd. They differ when an entry is
// present in A but not B, or in B but not A.
// eWiseAdd does the following, for a matrix, where "+" is the add binary op:
// if A(i,j) and B(i,j) are both present:
// C(i,j) = A(i,j) + B(i,j)
// else if A(i,j) is present but not B(i,j)
// C(i,j) = A(i,j)
// else if B(i,j) is present but not A(i,j)
// C(i,j) = B(i,j)
// by constrast, eWiseUnion always applies the operator:
// if A(i,j) and B(i,j) are both present:
// C(i,j) = A(i,j) + B(i,j)
// else if A(i,j) is present but not B(i,j)
// C(i,j) = A(i,j) + beta
// else if B(i,j) is present but not A(i,j)
// C(i,j) = alpha + B(i,j)
GB_PUBLIC
GrB_Info GxB_Vector_eWiseUnion // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp add, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Scalar alpha,
const GrB_Vector v, // second input: vector v
const GrB_Scalar beta,
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_eWiseUnion // C<M> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp add, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar alpha,
const GrB_Matrix B, // second input: matrix B
const GrB_Scalar beta,
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_eWiseUnion(C,Mask,accum,op,A,alpha,B,beta,desc) \
_Generic \
( \
(C), \
const GrB_Matrix : GxB_Matrix_eWiseUnion , \
GrB_Matrix : GxB_Matrix_eWiseUnion , \
const GrB_Vector : GxB_Vector_eWiseUnion , \
GrB_Vector : GxB_Vector_eWiseUnion \
) \
(C, Mask, accum, op, A, alpha, B, beta, desc)
#endif
//==============================================================================
// GrB_extract: extract a submatrix or subvector
//==============================================================================
// Extract entries from a matrix or vector; T = A(I,J). This (like most
// GraphBLAS methods) is then followed by C<Mask>=accum(C,T).
// To extract all rows of a matrix or vector, as in A (:,J), use I=GrB_ALL as
// the input argument. For all columns of a matrix, use J=GrB_ALL.
GB_PUBLIC const uint64_t *GrB_ALL ;
// To extract a range of rows and columns, I and J can be a list of 2 or 3
// indices that defines a range (begin:end) or a strided range (begin:inc:end).
// To specify the colon syntax I = begin:end, the array I has size at least 2,
// where I [GxB_BEGIN] = begin and I [GxB_END] = end. The parameter ni is then
// passed as the special value GxB_RANGE. To specify the colon syntax I =
// begin:inc:end, the array I has size at least three, with the values begin,
// end, and inc (in that order), and then pass in the value ni = GxB_STRIDE.
// The same can be done for the list J and its size, nj.
// These special values of ni and nj can be used for GrB_assign,
// GrB_extract, and GxB_subassign.
#define GxB_RANGE (INT64_MAX)
#define GxB_STRIDE (INT64_MAX-1)
#define GxB_BACKWARDS (INT64_MAX-2)
// for the strided range begin:inc:end, I [GxB_BEGIN] is the value of begin, I
// [GxB_END] is the value end, I [GxB_INC] is the magnitude of the stride. If
// the stride is negative, use ni = GxB_BACKWARDS.
#define GxB_BEGIN (0)
#define GxB_END (1)
#define GxB_INC (2)
// For example, the notation 10:-2:1 defines a sequence [10 8 6 4 2].
// The end point of the sequence (1) need not appear in the sequence, if
// the last increment goes past it. To specify the same in GraphBLAS,
// use:
// GrB_Index I [3], ni = GxB_BACKWARDS ;
// I [GxB_BEGIN ] = 10 ; // the start of the sequence
// I [GxB_INC ] = 2 ; // the magnitude of the increment
// I [GxB_END ] = 1 ; // the end of the sequence
GB_PUBLIC
GrB_Info GrB_Vector_extract // w<mask> = accum (w, u(I))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extract // C<Mask> = accum (C, A(I,J))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Col_extract // w<mask> = accum (w, A(I,j))
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//------------------------------------------------------------------------------
// GrB_extract: generic matrix/vector extraction
//------------------------------------------------------------------------------
// GrB_extract is a generic interface to the following functions:
// GrB_Vector_extract (w,mask,acc,u,I,ni,d) // w<m> = acc (w, u(I))
// GrB_Col_extract (w,mask,acc,A,I,ni,j,d) // w<m> = acc (w, A(I,j))
// GrB_Matrix_extract (C,Mask,acc,A,I,ni,J,nj,d) // C<Mask> = acc (C, A(I,J))
#if GxB_STDC_VERSION >= 201112L
#define GrB_extract(arg1,Mask,accum,arg4,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
const GrB_Vector : GrB_Vector_extract , \
GrB_Vector : GrB_Vector_extract , \
const GrB_Matrix : GrB_Col_extract , \
GrB_Matrix : GrB_Col_extract \
), \
GrB_Matrix : GrB_Matrix_extract \
) \
(arg1, Mask, accum, arg4, __VA_ARGS__)
#endif
//==============================================================================
// GxB_subassign: matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A)
//==============================================================================
// Assign entries in a matrix or vector; C(I,J) = A.
// Each GxB_subassign function is very similar to its corresponding GrB_assign
// function in the spec, but they differ in two ways: (1) the mask in
// GxB_subassign has the same size as w(I) for vectors and C(I,J) for matrices,
// and (2) they differ in the GrB_REPLACE option. See the user guide for
// details.
// In GraphBLAS notation, the two methods can be described as follows:
// matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A)
// matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A)
// --- assign ------------------------------------------------------------------
//
// GrB_Matrix_assign C<M>(I,J) += A M same size as matrix C.
// A is |I|-by-|J|
//
// GrB_Vector_assign w<m>(I) += u m same size as column vector w.
// u is |I|-by-1
//
// GrB_Row_assign C<m'>(i,J) += u' m is a column vector the same
// size as a row of C.
// u is |J|-by-1, i is a scalar.
//
// GrB_Col_assign C<m>(I,j) += u m is a column vector the same
// size as a column of C.
// u is |I|-by-1, j is a scalar.
//
// --- subassign ---------------------------------------------------------------
//
// GxB_Matrix_subassign C(I,J)<M> += A M same size as matrix A.
// A is |I|-by-|J|
//
// GxB_Vector_subassign w(I)<m> += u m same size as column vector u.
// u is |I|-by-1
//
// GxB_Row_subassign C(i,J)<m'> += u' m same size as column vector u.
// u is |J|-by-1, i is a scalar.
//
// GxB_Col_subassign C(I,j)<m> += u m same size as column vector u.
// u is |I|-by-1, j is a scalar.
GB_PUBLIC
GrB_Info GxB_Vector_subassign // w(I)<mask> = accum (w(I),u)
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign // C(I,J)<Mask> = accum (C(I,J),A)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J), Mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Col_subassign // C(I,j)<mask> = accum (C(I,j),u)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(I,j), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t)
const GrB_Vector u, // input vector
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for C(I,j) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Row_subassign // C(i,J)<mask'> = accum (C(i,J),u')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(i,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t)
const GrB_Vector u, // input vector
GrB_Index i, // row index
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(i,J) and mask
) ;
//------------------------------------------------------------------------------
// GxB_Vector_subassign_[SCALAR]: scalar expansion assignment to subvector
//------------------------------------------------------------------------------
// Assigns a single scalar to a subvector, w(I)<mask> = accum(w(I),x). The
// scalar x is implicitly expanded into a vector u of size ni-by-1, with each
// entry in u equal to x, and then w(I)<mask> = accum(w(I),u) is done.
GB_PUBLIC
GrB_Info GxB_Vector_subassign_BOOL // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x)
bool x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT8 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT8 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT16 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT16 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FP32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
float x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FP64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
double x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FC32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FC64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UDT // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
void *x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_Scalar // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GrB_Scalar x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_subassign_[SCALAR]: scalar expansion assignment to submatrix
//------------------------------------------------------------------------------
// Assigns a single scalar to a submatrix, C(I,J)<Mask> = accum(C(I,J),x). The
// scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each
// entry in A equal to x, and then C(I,J)<Mask> = accum(C(I,J),A) is done.
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_BOOL // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
bool x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT8 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT8 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT16 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT16 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FP32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
float x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FP64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
double x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FC32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FC64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UDT // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
void *x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_Scalar // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GrB_Scalar x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
//------------------------------------------------------------------------------
// GxB_subassign: generic submatrix/subvector assignment
//------------------------------------------------------------------------------
// GxB_subassign is a generic function that provides access to all specific
// GxB_*_subassign* functions:
// GxB_Vector_subassign (w,m,acc,u,I,ni,d) // w(I)<m> = acc(w(I),u)
// GxB_Matrix_subassign (C,M,acc,A,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),A)
// GxB_Col_subassign (C,m,acc,u,I,ni,j,d) // C(I,j)<m> = acc(C(I,j),u)
// GxB_Row_subassign (C,m,acc,u,i,J,nj,d) // C(i,J)<m'> = acc(C(i,J),u')
// GxB_Vector_subassign_T (w,m,acc,x,I,ni,d) // w(I)<m> = acc(w(I),x)
// GxB_Matrix_subassign_T (C,M,acc,x,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),x)
#if GxB_STDC_VERSION >= 201112L
#define GxB_subassign(arg1,Mask,accum,arg4,arg5,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
GB_CASES (, GxB, Vector_subassign) , \
const GrB_Scalar : GxB_Vector_subassign_Scalar, \
GrB_Scalar : GxB_Vector_subassign_Scalar, \
default: GxB_Vector_subassign \
), \
default: \
_Generic \
( \
(arg4), \
GB_CASES (, GxB, Matrix_subassign) , \
const GrB_Scalar : GxB_Matrix_subassign_Scalar, \
GrB_Scalar : GxB_Matrix_subassign_Scalar, \
const GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GxB_Col_subassign , \
GrB_Index *: GxB_Col_subassign , \
default: GxB_Row_subassign \
), \
GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GxB_Col_subassign , \
GrB_Index *: GxB_Col_subassign , \
default: GxB_Row_subassign \
), \
default: GxB_Matrix_subassign \
) \
) \
(arg1, Mask, accum, arg4, arg5, __VA_ARGS__)
#endif
//==============================================================================
// GrB_assign: matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A)
//==============================================================================
// Assign entries in a matrix or vector; C(I,J) = A.
// Each of these can be used with their generic name, GrB_assign.
GB_PUBLIC
GrB_Info GrB_Vector_assign // w<mask>(I) = accum (w(I),u)
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign // C<Mask>(I,J) = accum (C(I,J),A)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Col_assign // C<mask>(I,j) = accum (C(I,j),u)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(:,j), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t)
const GrB_Vector u, // input vector
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for C(:,j) and mask
) ;
GB_PUBLIC
GrB_Info GrB_Row_assign // C<mask'>(i,J) = accum (C(i,J),u')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(i,:), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t)
const GrB_Vector u, // input vector
GrB_Index i, // row index
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(i,:) and mask
) ;
//------------------------------------------------------------------------------
// GrB_Vector_assign_[SCALAR]: scalar expansion assignment to subvector
//------------------------------------------------------------------------------
// Assigns a single scalar to a subvector, w<mask>(I) = accum(w(I),x). The
// scalar x is implicitly expanded into a vector u of size ni-by-1, with each
// entry in u equal to x, and then w<mask>(I) = accum(w(I),u) is done.
GB_PUBLIC
GrB_Info GrB_Vector_assign_BOOL // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x)
bool x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT8 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT8 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT16 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT16 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_FP32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
float x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_FP64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
double x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_assign_FC32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_assign_FC64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UDT // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
void *x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_Scalar // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GrB_Scalar x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_assign_[SCALAR]: scalar expansion assignment to submatrix
//------------------------------------------------------------------------------
// Assigns a single scalar to a submatrix, C<Mask>(I,J) = accum(C(I,J),x). The
// scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each
// entry in A equal to x, and then C<Mask>(I,J) = accum(C(I,J),A) is done.
GB_PUBLIC
GrB_Info GrB_Matrix_assign_BOOL // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
bool x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT8 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT8 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT16 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT16 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_FP32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
float x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_FP64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
double x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_assign_FC32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_assign_FC64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UDT // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
void *x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_Scalar // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GrB_Scalar x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
//------------------------------------------------------------------------------
// GrB_assign: generic submatrix/subvector assignment
//------------------------------------------------------------------------------
// GrB_assign is a generic function that provides access to all specific
// GrB_*_assign* functions:
// GrB_Vector_assign_T (w,m,acc,x,I,ni,d) // w<m>(I) = acc(w(I),x)
// GrB_Vector_assign (w,m,acc,u,I,ni,d) // w<m>(I) = acc(w(I),u)
// GrB_Matrix_assign_T (C,M,acc,x,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),x)
// GrB_Col_assign (C,m,acc,u,I,ni,j,d) // C<m>(I,j) = acc(C(I,j),u)
// GrB_Row_assign (C,m,acc,u,i,J,nj,d) // C<m'>(i,J) = acc(C(i,J),u')
// GrB_Matrix_assign (C,M,acc,A,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),A)
#if GxB_STDC_VERSION >= 201112L
#define GrB_assign(arg1,Mask,accum,arg4,arg5,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
GB_CASES (, GrB, Vector_assign) , \
const GrB_Scalar : GrB_Vector_assign_Scalar , \
GrB_Scalar : GrB_Vector_assign_Scalar , \
default: GrB_Vector_assign \
), \
default: \
_Generic \
( \
(arg4), \
GB_CASES (, GrB, Matrix_assign) , \
const GrB_Scalar : GrB_Matrix_assign_Scalar , \
GrB_Scalar : GrB_Matrix_assign_Scalar , \
const GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GrB_Col_assign , \
GrB_Index *: GrB_Col_assign , \
default: GrB_Row_assign \
), \
GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GrB_Col_assign , \
GrB_Index *: GrB_Col_assign , \
default: GrB_Row_assign \
), \
default: GrB_Matrix_assign \
) \
) \
(arg1, Mask, accum, arg4, arg5, __VA_ARGS__)
#endif
//==============================================================================
// GrB_apply: matrix and vector apply
//==============================================================================
// Apply a unary, index_unary, or binary operator to entries in a matrix or
// vector, C<M> = accum (C, op (A)).
GB_PUBLIC
GrB_Info GrB_Vector_apply // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_UnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply // C<Mask> = accum (C, op(A)) or op(A')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_UnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// vector apply: binaryop variants (bind 1st)
//-------------------------------------------
// Apply a binary operator to the entries in a vector, binding the first
// input to a scalar x, w<mask> = accum (w, op (x,u)).
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_Scalar // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
// historical: identical to GxB_Vector_apply_BinaryOp1st
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_BOOL // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
bool x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT8 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int8_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT16 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int16_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT8 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint8_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT16 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint16_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_FP32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
float x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_FP64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
double x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st_FC32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st_FC64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UDT // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const void *x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// vector apply: binaryop variants (bind 2nd)
//-------------------------------------------
// Apply a binary operator to the entries in a vector, binding the second
// input to a scalar y, w<mask> = accum (w, op (u,y)).
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_Scalar // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
// historical: identical to GrB_Vector_apply_BinaryOp2nd_Scalar
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_BOOL // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT8 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT16 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT8 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT16 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_FP32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_FP64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd_FC32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd_FC64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UDT // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// vector apply: IndexUnaryOp variants
//-------------------------------------------
// Apply a GrB_IndexUnaryOp to the entries in a vector
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_Scalar // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_BOOL // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_FP32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_FP64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_IndexOp_FC32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_IndexOp_FC64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UDT // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// matrix apply: binaryop variants (bind 1st)
//-------------------------------------------
// Apply a binary operator to the entries in a matrix, binding the first input
// to a scalar x, C<Mask> = accum (C, op (x,A)), or op(x,A').
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_Scalar // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// historical: identical to GrB_Matrix_apply_BinaryOp1st_Scalar
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_BOOL // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
bool x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT8 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int8_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT16 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int16_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT8 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint8_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT16 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint16_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_FP32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
float x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_FP64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
double x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st_FC32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st_FC64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UDT // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const void *x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// matrix apply: binaryop variants (bind 2nd)
//-------------------------------------------
// Apply a binary operator to the entries in a matrix, binding the second input
// to a scalar y, C<Mask> = accum (C, op (A,y)), or op(A',y).
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_Scalar // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// historical: identical to GrB_Matrix_apply_BinaryOp2nd_Scalar
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_BOOL // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT8 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT16 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT8 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT16 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UDT // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// matrix apply: IndexUnaryOp variants
//-------------------------------------------
// Apply a GrB_IndexUnaryOp to the entries in a matrix.
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_Scalar // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_BOOL // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_FP32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_FP64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_IndexOp_FC32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_IndexOp_FC64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UDT // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//------------------------------------------------------------------------------
// GrB_apply: generic matrix/vector apply
//------------------------------------------------------------------------------
// GrB_apply is a generic function for applying a unary operator to a matrix
// or vector and provides access to these functions:
// GrB_Vector_apply (w,mask,acc,op,u,d) // w<mask> = accum (w, op(u))
// GrB_Matrix_apply (C,Mask,acc,op,A,d) // C<Mask> = accum (C, op(A))
// GrB_Vector_apply (w,m,acc,unop ,u,d)
// GrB_Vector_apply_BinaryOp1st_TYPE (w,m,acc,binop,x,u,d)
// GrB_Vector_apply_BinaryOp2nd_TYPE (w,m,acc,binop,u,y,d)
// GrB_Vector_apply_IndexOp_TYPE (w,m,acc,idxop,u,y,d)
// GrB_Matrix_apply (C,M,acc,unop ,A,d)
// GrB_Matrix_apply_BinaryOp1st_TYPE (C,M,acc,binop,x,A,d)
// GrB_Matrix_apply_BinaryOp2nd_TYPE (C,M,acc,binop,A,y,d)
// GrB_Matrix_apply_IndexOp_TYPE (C,M,acc,idxop,A,y,d)
#if GxB_STDC_VERSION >= 201112L
#define GB_BIND(kind,x,y,...) \
_Generic \
( \
(x), \
const GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \
GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \
GB_CASES (, GrB, GB_CONCAT ( kind, _apply_BinaryOp1st,, )) , \
default: \
_Generic \
( \
(y), \
GB_CASES (, GrB, GB_CONCAT ( kind , _apply_BinaryOp2nd,, )), \
default: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp2nd_Scalar) \
) \
)
#define GB_IDXOP(kind,A,y,...) \
_Generic \
( \
(y), \
GB_CASES (, GrB, GB_CONCAT ( kind, _apply_IndexOp,, )), \
default: GB_CONCAT ( GrB, _, kind, _apply_IndexOp_Scalar) \
)
#define GrB_apply(C,Mask,accum,op,...) \
_Generic \
( \
(C), \
GrB_Vector : \
_Generic \
( \
(op), \
GrB_UnaryOp : GrB_Vector_apply , \
GrB_BinaryOp : GB_BIND (Vector, __VA_ARGS__), \
GrB_IndexUnaryOp : GB_IDXOP (Vector, __VA_ARGS__) \
), \
GrB_Matrix : \
_Generic \
( \
(op), \
GrB_UnaryOp : GrB_Matrix_apply , \
GrB_BinaryOp : GB_BIND (Matrix, __VA_ARGS__), \
GrB_IndexUnaryOp : GB_IDXOP (Matrix, __VA_ARGS__) \
) \
) \
(C, Mask, accum, op, __VA_ARGS__)
#endif
//==============================================================================
// GrB_select: matrix and vector selection using an IndexUnaryOp
//==============================================================================
//-------------------------------------------
// vector select using an IndexUnaryOp
//-------------------------------------------
GB_PUBLIC
GrB_Info GrB_Vector_select_Scalar // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_BOOL // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_FP32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_FP64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_select_FC32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_select_FC64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UDT // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// matrix select using an IndexUnaryOp
//-------------------------------------------
GB_PUBLIC
GrB_Info GrB_Matrix_select_Scalar // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_BOOL // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_FP32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_FP64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select_FC32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select_FC64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UDT // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// GrB_select is a generic method that applies an IndexUnaryOp to
// a matrix or vector, using any type of the scalar y.
// GrB_Vector_select_TYPE (w,m,acc,idxop,u,y,d)
// GrB_Matrix_select_TYPE (C,M,acc,idxop,A,y,d)
#if GxB_STDC_VERSION >= 201112L
#define GrB_select(C,Mask,accum,op,x,y,d) \
_Generic \
( \
(C), \
GrB_Vector : \
_Generic \
( \
(y), \
GB_CASES (, GrB, Vector_select), \
default: GrB_Vector_select_Scalar \
), \
GrB_Matrix : \
_Generic \
( \
(y), \
GB_CASES (, GrB, Matrix_select), \
default: GrB_Matrix_select_Scalar \
) \
) \
(C, Mask, accum, op, x, y, d)
#endif
//==============================================================================
// GxB_select: matrix and vector selection (historical)
//==============================================================================
// GrB_select and with the GrB_IndexUnaryOp operators should be used instead.
GB_PUBLIC
GrB_Info GxB_Vector_select // w<mask> = accum (w, op(u,k))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GxB_SelectOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar Thunk, // optional input for the select operator
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select // C<Mask> = accum (C, op(A,k)) or op(A',k)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GxB_SelectOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar Thunk, // optional input for the select operator
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_select(C,Mask,accum,op,A,Thunk,desc) \
_Generic \
( \
(C), \
GrB_Vector : GxB_Vector_select , \
GrB_Matrix : GxB_Matrix_select \
) \
(C, Mask, accum, op, A, Thunk, desc)
#endif
//==============================================================================
// GrB_reduce: matrix and vector reduction
//==============================================================================
// Reduce the entries in a matrix to a vector, a column vector t such that
// t(i) = sum (A (i,:)), and where "sum" is a commutative and associative
// monoid with an identity value. A can be transposed, which reduces down the
// columns instead of the rows.
// For GrB_Matrix_reduce_BinaryOp, the GrB_BinaryOp op must correspond to a
// known built-in monoid:
//
// operator data-types (all built-in)
// ---------------------- ---------------------------
// MIN, MAX INT*, UINT*, FP*
// TIMES, PLUS INT*, UINT*, FP*, FC*
// ANY INT*, UINT*, FP*, FC*, BOOL
// LOR, LAND, LXOR, EQ BOOL
// BOR, BAND, BXOR, BXNOR UINT*
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_Monoid // w<mask> = accum (w,reduce(A))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // reduce operator for t=reduce(A)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BinaryOp // w<mask> = accum (w,reduce(A))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // reduce operator for t=reduce(A)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//------------------------------------------------------------------------------
// reduce a vector to a scalar
//------------------------------------------------------------------------------
// Reduce entries in a vector to a scalar, c = accum (c, reduce_to_scalar(u))
GB_PUBLIC
GrB_Info GrB_Vector_reduce_BOOL // c = accum (c, reduce_to_scalar (u))
(
bool *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT8 // c = accum (c, reduce_to_scalar (u))
(
int8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT8 // c = accum (c, reduce_to_scalar (u))
(
uint8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT16 // c = accum (c, reduce_to_scalar (u))
(
int16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT16 // c = accum (c, reduce_to_scalar (u))
(
uint16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT32 // c = accum (c, reduce_to_scalar (u))
(
int32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT32 // c = accum (c, reduce_to_scalar (u))
(
uint32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT64 // c = accum (c, reduce_to_scalar (u))
(
int64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT64 // c = accum (c, reduce_to_scalar (u))
(
uint64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_FP32 // c = accum (c, reduce_to_scalar (u))
(
float *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_FP64 // c = accum (c, reduce_to_scalar (u))
(
double *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_reduce_FC32 // c = accum (c, reduce_to_scalar (u))
(
GxB_FC32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_reduce_FC64 // c = accum (c, reduce_to_scalar (u))
(
GxB_FC64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UDT // c = accum (c, reduce_to_scalar (u))
(
void *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(u))
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_BinaryOp_Scalar
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_BinaryOp op, // binary op to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// reduce a matrix to a scalar
//------------------------------------------------------------------------------
// Reduce entries in a matrix to a scalar, c = accum (c, reduce_to_scalar(A))
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BOOL // c = accum (c, reduce_to_scalar (A))
(
bool *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT8 // c = accum (c, reduce_to_scalar (A))
(
int8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT8 // c = accum (c, reduce_to_scalar (A))
(
uint8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT16 // c = accum (c, reduce_to_scalar (A))
(
int16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT16 // c = accum (c, reduce_to_scalar (A))
(
uint16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT32 // c = accum (c, reduce_to_scalar (A))
(
int32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT32 // c = accum (c, reduce_to_scalar (A))
(
uint32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT64 // c = accum (c, reduce_to_scalar (A))
(
int64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT64 // c = accum (c, reduce_to_scalar (A))
(
uint64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_FP32 // c = accum (c, reduce_to_scalar (A))
(
float *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_FP64 // c = accum (c, reduce_to_scalar (A))
(
double *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_reduce_FC32 // c = accum (c, reduce_to_scalar (A))
(
GxB_FC32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_reduce_FC64 // c = accum (c, reduce_to_scalar (A))
(
GxB_FC64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UDT // c = accum (c, reduce_to_scalar (A))
(
void *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(A))
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BinaryOp_Scalar
(
GrB_Scalar S, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_BinaryOp op, // binary op to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GrB_reduce: generic matrix/vector reduction to a vector or scalar
//------------------------------------------------------------------------------
// GrB_reduce is a generic function that provides access to all GrB_*reduce*
// functions:
// reduce matrix to vector:
// GrB_Matrix_reduce_Monoid (w,mask,acc,mo,A,d) // w<mask> = acc (w,reduce(A))
// GrB_Matrix_reduce_BinaryOp (w,mask,acc,op,A,d) // w<mask> = acc (w,reduce(A))
// reduce matrix to scalar:
// GrB_Vector_reduce_[SCALAR] (c,acc,monoid,u,d) // c = acc (c,reduce(u))
// GrB_Matrix_reduce_[SCALAR] (c,acc,monoid,A,d) // c = acc (c,reduce(A))
// GrB_Vector_reduce_Monoid_Scalar (s,acc,monoid,u,d) // s = acc (s,reduce(u))
// GrB_Matrix_reduce_Monoid_Scalar (s,acc,monoid,A,d) // s = acc (s,reduce(A))
// GrB_Vector_reduce_BinaryOp_Scalar (s,acc,op,u,d) // s = acc (s,reduce(u))
// GrB_Matrix_reduce_BinaryOp_Scalar (s,acc,op,A,d) // s = acc (s,reduce(A))
#if GxB_STDC_VERSION >= 201112L
#define GB_REDUCE_TO_SCALAR(kind,c,op) \
_Generic \
( \
(c), \
GB_CASES (*, GrB, GB_CONCAT ( kind, _reduce,, )), \
default: \
_Generic \
( \
(op), \
const GrB_BinaryOp : \
GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\
GrB_BinaryOp : \
GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\
default: GB_CONCAT (GrB,_,kind,_reduce_Monoid_Scalar) \
) \
)
#define GrB_reduce(arg1,arg2,arg3,arg4,...) \
_Generic \
( \
(arg4), \
const GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \
GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \
const GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \
GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \
const GrB_Monoid : GrB_Matrix_reduce_Monoid , \
GrB_Monoid : GrB_Matrix_reduce_Monoid , \
const GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp \
) \
(arg1, arg2, arg3, arg4, __VA_ARGS__)
#endif
//==============================================================================
// GrB_transpose: matrix transpose
//==============================================================================
GB_PUBLIC
GrB_Info GrB_transpose // C<Mask> = accum (C, A')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
//==============================================================================
// GrB_kronecker: Kronecker product
//==============================================================================
// GxB_kron is historical; use GrB_kronecker instead
GB_PUBLIC
GrB_Info GxB_kron // C<Mask> = accum(C,kron(A,B)) (historical)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_BinaryOp // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_Monoid // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_Semiring // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_kronecker(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_kronecker_Semiring , \
GrB_Semiring : GrB_Matrix_kronecker_Semiring , \
const GrB_Monoid : GrB_Matrix_kronecker_Monoid , \
GrB_Monoid : GrB_Matrix_kronecker_Monoid , \
const GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GrB_Monoid: built-in monoids
//==============================================================================
GB_PUBLIC GrB_Monoid
//--------------------------------------------------------------------------
// 10 MIN monoids: (not for complex types)
//--------------------------------------------------------------------------
// GxB_MIN monoids, historical, use GrB_MIN_MONOID_* instead:
GxB_MIN_INT8_MONOID, // identity: INT8_MAX terminal: INT8_MIN
GxB_MIN_INT16_MONOID, // identity: INT16_MAX terminal: INT16_MIN
GxB_MIN_INT32_MONOID, // identity: INT32_MAX terminal: INT32_MIN
GxB_MIN_INT64_MONOID, // identity: INT64_MAX terminal: INT32_MIN
GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX terminal: 0
GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX terminal: 0
GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX terminal: 0
GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX terminal: 0
GxB_MIN_FP32_MONOID, // identity: INFINITY terminal: -INFINITY
GxB_MIN_FP64_MONOID, // identity: INFINITY terminal: -INFINITY
// preferred names from the v1.3 spec:
GrB_MIN_MONOID_INT8, // identity: INT8_MAX terminal: INT8_MIN
GrB_MIN_MONOID_INT16, // identity: INT16_MAX terminal: INT16_MIN
GrB_MIN_MONOID_INT32, // identity: INT32_MAX terminal: INT32_MIN
GrB_MIN_MONOID_INT64, // identity: INT64_MAX terminal: INT32_MIN
GrB_MIN_MONOID_UINT8, // identity: UINT8_MAX terminal: 0
GrB_MIN_MONOID_UINT16, // identity: UINT16_MAX terminal: 0
GrB_MIN_MONOID_UINT32, // identity: UINT32_MAX terminal: 0
GrB_MIN_MONOID_UINT64, // identity: UINT64_MAX terminal: 0
GrB_MIN_MONOID_FP32, // identity: INFINITY terminal: -INFINITY
GrB_MIN_MONOID_FP64, // identity: INFINITY terminal: -INFINITY
//--------------------------------------------------------------------------
// 10 MAX monoids:
//--------------------------------------------------------------------------
// GxB_MAX monoids, historical, use GrB_MAX_MONOID_* instead:
GxB_MAX_INT8_MONOID, // identity: INT8_MIN terminal: INT8_MAX
GxB_MAX_INT16_MONOID, // identity: INT16_MIN terminal: INT16_MAX
GxB_MAX_INT32_MONOID, // identity: INT32_MIN terminal: INT32_MAX
GxB_MAX_INT64_MONOID, // identity: INT64_MIN terminal: INT64_MAX
GxB_MAX_UINT8_MONOID, // identity: 0 terminal: UINT8_MAX
GxB_MAX_UINT16_MONOID, // identity: 0 terminal: UINT16_MAX
GxB_MAX_UINT32_MONOID, // identity: 0 terminal: UINT32_MAX
GxB_MAX_UINT64_MONOID, // identity: 0 terminal: UINT64_MAX
GxB_MAX_FP32_MONOID, // identity: -INFINITY terminal: INFINITY
GxB_MAX_FP64_MONOID, // identity: -INFINITY terminal: INFINITY
// preferred names from the v1.3 spec:
GrB_MAX_MONOID_INT8, // identity: INT8_MIN terminal: INT8_MAX
GrB_MAX_MONOID_INT16, // identity: INT16_MIN terminal: INT16_MAX
GrB_MAX_MONOID_INT32, // identity: INT32_MIN terminal: INT32_MAX
GrB_MAX_MONOID_INT64, // identity: INT64_MIN terminal: INT64_MAX
GrB_MAX_MONOID_UINT8, // identity: 0 terminal: UINT8_MAX
GrB_MAX_MONOID_UINT16, // identity: 0 terminal: UINT16_MAX
GrB_MAX_MONOID_UINT32, // identity: 0 terminal: UINT32_MAX
GrB_MAX_MONOID_UINT64, // identity: 0 terminal: UINT64_MAX
GrB_MAX_MONOID_FP32, // identity: -INFINITY terminal: INFINITY
GrB_MAX_MONOID_FP64, // identity: -INFINITY terminal: INFINITY
//--------------------------------------------------------------------------
// 12 PLUS monoids:
//--------------------------------------------------------------------------
// GxB_PLUS monoids, historical, use GrB_PLUS_MONOID_* instead:
GxB_PLUS_INT8_MONOID, // identity: 0
GxB_PLUS_INT16_MONOID, // identity: 0
GxB_PLUS_INT32_MONOID, // identity: 0
GxB_PLUS_INT64_MONOID, // identity: 0
GxB_PLUS_UINT8_MONOID, // identity: 0
GxB_PLUS_UINT16_MONOID, // identity: 0
GxB_PLUS_UINT32_MONOID, // identity: 0
GxB_PLUS_UINT64_MONOID, // identity: 0
GxB_PLUS_FP32_MONOID, // identity: 0
GxB_PLUS_FP64_MONOID, // identity: 0
// preferred names from the v1.3 spec:
GrB_PLUS_MONOID_INT8, // identity: 0
GrB_PLUS_MONOID_INT16, // identity: 0
GrB_PLUS_MONOID_INT32, // identity: 0
GrB_PLUS_MONOID_INT64, // identity: 0
GrB_PLUS_MONOID_UINT8, // identity: 0
GrB_PLUS_MONOID_UINT16, // identity: 0
GrB_PLUS_MONOID_UINT32, // identity: 0
GrB_PLUS_MONOID_UINT64, // identity: 0
GrB_PLUS_MONOID_FP32, // identity: 0
GrB_PLUS_MONOID_FP64, // identity: 0
// complex monoids:
GxB_PLUS_FC32_MONOID, // identity: 0
GxB_PLUS_FC64_MONOID, // identity: 0
//--------------------------------------------------------------------------
// 12 TIMES monoids: identity value is 1, int* and uint* are terminal
//--------------------------------------------------------------------------
// GxB_TIMES monoids, historical, use GrB_TIMES_MONOID_* instead:
GxB_TIMES_INT8_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT16_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT32_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT64_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT8_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT16_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT32_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT64_MONOID, // identity: 1 terminal: 0
GxB_TIMES_FP32_MONOID, // identity: 1
GxB_TIMES_FP64_MONOID, // identity: 1
// preferred names from the v1.3 spec:
GrB_TIMES_MONOID_INT8, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT16, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT32, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT64, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT8, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT16, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT32, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT64, // identity: 1 terminal: 0
GrB_TIMES_MONOID_FP32, // identity: 1
GrB_TIMES_MONOID_FP64, // identity: 1
// complex monoids:
GxB_TIMES_FC32_MONOID, // identity: 1
GxB_TIMES_FC64_MONOID, // identity: 1
//--------------------------------------------------------------------------
// 13 ANY monoids:
//--------------------------------------------------------------------------
GxB_ANY_BOOL_MONOID, // identity: any value terminal: any value
GxB_ANY_INT8_MONOID, // identity: any value terminal: any value
GxB_ANY_INT16_MONOID, // identity: any value terminal: any value
GxB_ANY_INT32_MONOID, // identity: any value terminal: any value
GxB_ANY_INT64_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT8_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT16_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT32_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT64_MONOID, // identity: any value terminal: any value
GxB_ANY_FP32_MONOID, // identity: any value terminal: any value
GxB_ANY_FP64_MONOID, // identity: any value terminal: any value
GxB_ANY_FC32_MONOID, // identity: any value terminal: any value
GxB_ANY_FC64_MONOID, // identity: any value terminal: any value
//--------------------------------------------------------------------------
// 4 Boolean monoids: (see also the GxB_ANY_BOOL_MONOID above)
//--------------------------------------------------------------------------
// GxB_* boolean monoids, historical, use GrB_* instead:
GxB_LOR_BOOL_MONOID, // identity: false terminal: true
GxB_LAND_BOOL_MONOID, // identity: true terminal: false
GxB_LXOR_BOOL_MONOID, // identity: false
GxB_LXNOR_BOOL_MONOID, // identity: true
GxB_EQ_BOOL_MONOID, // (alternative name for GrB_LXNOR_MONOID_BOOL)
// preferred names from the v1.3 spec:
GrB_LOR_MONOID_BOOL, // identity: false terminal: true
GrB_LAND_MONOID_BOOL, // identity: true terminal: false
GrB_LXOR_MONOID_BOOL, // identity: false
GrB_LXNOR_MONOID_BOOL, // identity: true
//--------------------------------------------------------------------------
// 16 Bitwise-or monoids:
//--------------------------------------------------------------------------
// BOR monoids (bitwise or):
GxB_BOR_UINT8_MONOID, // identity: 0 terminal: 0xFF
GxB_BOR_UINT16_MONOID, // identity: 0 terminal: 0xFFFF
GxB_BOR_UINT32_MONOID, // identity: 0 terminal: 0xFFFFFFFF
GxB_BOR_UINT64_MONOID, // identity: 0 terminal: 0xFFFFFFFFFFFFFFFF
// BAND monoids (bitwise and):
GxB_BAND_UINT8_MONOID, // identity: 0xFF terminal: 0
GxB_BAND_UINT16_MONOID, // identity: 0xFFFF terminal: 0
GxB_BAND_UINT32_MONOID, // identity: 0xFFFFFFFF terminal: 0
GxB_BAND_UINT64_MONOID, // identity: 0xFFFFFFFFFFFFFFFF terminal: 0
// BXOR monoids (bitwise xor):
GxB_BXOR_UINT8_MONOID, // identity: 0
GxB_BXOR_UINT16_MONOID, // identity: 0
GxB_BXOR_UINT32_MONOID, // identity: 0
GxB_BXOR_UINT64_MONOID, // identity: 0
// BXNOR monoids (bitwise xnor):
GxB_BXNOR_UINT8_MONOID, // identity: 0xFF
GxB_BXNOR_UINT16_MONOID, // identity: 0xFFFF
GxB_BXNOR_UINT32_MONOID, // identity: 0xFFFFFFFF
GxB_BXNOR_UINT64_MONOID ; // identity: 0xFFFFFFFFFFFFFFFF
//==============================================================================
// GrB_Semiring: built-in semirings
//==============================================================================
// Using built-in types and operators, SuiteSparse:GraphBLAS provides
// 1553 pre-defined, built-in semirings:
// 1000 semirings with a multiply operator TxT -> T where T is non-Boolean,
// from the complete cross product of:
// 5 monoids: MIN, MAX, PLUS, TIMES, ANY
// 20 multiply operators:
// FIRST, SECOND, PAIR (=ONEB), MIN, MAX, PLUS, MINUS, TIMES, DIV,
// RDIV, RMINUS
// ISEQ, ISNE, ISGT, ISLT, ISGE, ISLE,
// LOR, LAND, LXOR
// 10 non-Boolean real types, T
//
// Note that min_pair, max_pair, times_pair are all identical to any_pair.
// These 30 semirings are named below, but are internally remapped to
// their corresponding any_pair semiring.
// 300 semirings with a comparator TxT -> bool, where T is
// non-Boolean, from the complete cross product of:
// 5 Boolean monoids: LAND, LOR, LXOR, EQ (=LXNOR), ANY
// 6 multiply operators: EQ, NE, GT, LT, GE, LE
// 10 non-Boolean real types, T
// 55 semirings with purely Boolean types, bool x bool -> bool, from the
// complete cross product of:
// 5 Boolean monoids LAND, LOR, LXOR, EQ (=LXNOR), ANY
// 11 multiply operators:
// FIRST, SECOND, LOR, LAND, LXOR, EQ (=LXNOR), GT, LT, GE, LE,
// PAIR (=ONEB)
//
// Note that lor_pair, land_pair, and eq_pair are all identical to
// any_pair. These 3 semirings are named below, but are internally
// remapped to any_pair_bool semiring.
// 54 complex semirings: TxT -> T where T is float complex or double complex:
// 3 complex monoids: PLUS, TIMES, ANY
// 9 complex multiply operators:
// FIRST, SECOND, PAIR (=ONEB), PLUS, MINUS, TIMES, DIV, RDIV, RMINUS
// 2 complex types
//
// Note that times_pair is identical to any_pair.
// These 2 semirings are named below, but are internally remapped to
// their corresponding any_pair semiring.
// 64 bitwise semirings: TxT -> T where T is an unsigned integer:
// 4 bitwise monoids: BOR, BAND, BXOR, BXNOR
// 4 bitwise multiply operators: BOR, BAND, BXOR, BXNOR
// 4 unsigned integer types: UINT8, UINT16, UINT32, UINT64
// 80 positional semirings: XxX -> T where T is int64 or int32, and the type of
// X is ignored:
// 5 monoids: MIN, MAX, PLUS, TIMES, ANY
// 8 multiply operators:
// FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1,
// SECONDI, SECONDI1, SECONDJ, SECONDJ1
// 2 types: int32, int64
// The ANY operator is also valid to use as a multiplicative operator in a
// semiring, but serves no purpose in that case. The ANY operator is meant as
// a fast additive operator for a monoid, that terminates, or short-circuits,
// as soon as any value is found. A valid user semiring can be constructed
// with ANY as the multiply operator, but they are not predefined below.
// Likewise, additional built-in operators can be used as multiplicative
// operators for floating-point semirings (POW, ATAN2, HYPOT, ...) and many
// more semirings can be constructed from bitwise monoids and many integer
// binary (non-bitwise) multiplicative operators, but these are not
// pre-defined.
// In the names below, each semiring has a name of the form GxB_add_mult_T
// where add is the additive monoid, mult is the multiply operator, and T is
// the type. The type T is always the type of x and y for the z=mult(x,y)
// operator. The monoid's three types and the ztype of the mult operator are
// always the same. This is the type T for the first set, and Boolean for
// the second and third sets of semirngs.
// 1553 = 1000 + 300 + 55 + 54 + 64 + 80 semirings are named below, but 35 = 30
// + 3 + 2 are identical to the corresponding any_pair semirings of the same
// type. For positional semirings, the mulitiply ops FIRSTJ and SECONDI are
// identical, as are FIRSTJ1 and SECONDI1. These semirings still appear as
// predefined, for convenience.
GB_PUBLIC GrB_Semiring
//------------------------------------------------------------------------------
// 1000 non-Boolean semirings where all types are the same, given by suffix _T
//------------------------------------------------------------------------------
// semirings with multiply op: z = FIRST (x,y), all types x,y,z the same:
GxB_MIN_FIRST_INT8 , GxB_MAX_FIRST_INT8 , GxB_PLUS_FIRST_INT8 , GxB_TIMES_FIRST_INT8 , GxB_ANY_FIRST_INT8 ,
GxB_MIN_FIRST_INT16 , GxB_MAX_FIRST_INT16 , GxB_PLUS_FIRST_INT16 , GxB_TIMES_FIRST_INT16 , GxB_ANY_FIRST_INT16 ,
GxB_MIN_FIRST_INT32 , GxB_MAX_FIRST_INT32 , GxB_PLUS_FIRST_INT32 , GxB_TIMES_FIRST_INT32 , GxB_ANY_FIRST_INT32 ,
GxB_MIN_FIRST_INT64 , GxB_MAX_FIRST_INT64 , GxB_PLUS_FIRST_INT64 , GxB_TIMES_FIRST_INT64 , GxB_ANY_FIRST_INT64 ,
GxB_MIN_FIRST_UINT8 , GxB_MAX_FIRST_UINT8 , GxB_PLUS_FIRST_UINT8 , GxB_TIMES_FIRST_UINT8 , GxB_ANY_FIRST_UINT8 ,
GxB_MIN_FIRST_UINT16 , GxB_MAX_FIRST_UINT16 , GxB_PLUS_FIRST_UINT16 , GxB_TIMES_FIRST_UINT16 , GxB_ANY_FIRST_UINT16 ,
GxB_MIN_FIRST_UINT32 , GxB_MAX_FIRST_UINT32 , GxB_PLUS_FIRST_UINT32 , GxB_TIMES_FIRST_UINT32 , GxB_ANY_FIRST_UINT32 ,
GxB_MIN_FIRST_UINT64 , GxB_MAX_FIRST_UINT64 , GxB_PLUS_FIRST_UINT64 , GxB_TIMES_FIRST_UINT64 , GxB_ANY_FIRST_UINT64 ,
GxB_MIN_FIRST_FP32 , GxB_MAX_FIRST_FP32 , GxB_PLUS_FIRST_FP32 , GxB_TIMES_FIRST_FP32 , GxB_ANY_FIRST_FP32 ,
GxB_MIN_FIRST_FP64 , GxB_MAX_FIRST_FP64 , GxB_PLUS_FIRST_FP64 , GxB_TIMES_FIRST_FP64 , GxB_ANY_FIRST_FP64 ,
// semirings with multiply op: z = SECOND (x,y), all types x,y,z the same:
GxB_MIN_SECOND_INT8 , GxB_MAX_SECOND_INT8 , GxB_PLUS_SECOND_INT8 , GxB_TIMES_SECOND_INT8 , GxB_ANY_SECOND_INT8 ,
GxB_MIN_SECOND_INT16 , GxB_MAX_SECOND_INT16 , GxB_PLUS_SECOND_INT16 , GxB_TIMES_SECOND_INT16 , GxB_ANY_SECOND_INT16 ,
GxB_MIN_SECOND_INT32 , GxB_MAX_SECOND_INT32 , GxB_PLUS_SECOND_INT32 , GxB_TIMES_SECOND_INT32 , GxB_ANY_SECOND_INT32 ,
GxB_MIN_SECOND_INT64 , GxB_MAX_SECOND_INT64 , GxB_PLUS_SECOND_INT64 , GxB_TIMES_SECOND_INT64 , GxB_ANY_SECOND_INT64 ,
GxB_MIN_SECOND_UINT8 , GxB_MAX_SECOND_UINT8 , GxB_PLUS_SECOND_UINT8 , GxB_TIMES_SECOND_UINT8 , GxB_ANY_SECOND_UINT8 ,
GxB_MIN_SECOND_UINT16 , GxB_MAX_SECOND_UINT16 , GxB_PLUS_SECOND_UINT16 , GxB_TIMES_SECOND_UINT16, GxB_ANY_SECOND_UINT16 ,
GxB_MIN_SECOND_UINT32 , GxB_MAX_SECOND_UINT32 , GxB_PLUS_SECOND_UINT32 , GxB_TIMES_SECOND_UINT32, GxB_ANY_SECOND_UINT32 ,
GxB_MIN_SECOND_UINT64 , GxB_MAX_SECOND_UINT64 , GxB_PLUS_SECOND_UINT64 , GxB_TIMES_SECOND_UINT64, GxB_ANY_SECOND_UINT64 ,
GxB_MIN_SECOND_FP32 , GxB_MAX_SECOND_FP32 , GxB_PLUS_SECOND_FP32 , GxB_TIMES_SECOND_FP32 , GxB_ANY_SECOND_FP32 ,
GxB_MIN_SECOND_FP64 , GxB_MAX_SECOND_FP64 , GxB_PLUS_SECOND_FP64 , GxB_TIMES_SECOND_FP64 , GxB_ANY_SECOND_FP64 ,
// semirings with multiply op: z = PAIR (x,y), all types x,y,z the same:
// (note that min_pair, max_pair, times_pair are all identical to any_pair, and are marked below)
GxB_MIN_PAIR_INT8 /**/, GxB_MAX_PAIR_INT8 /**/, GxB_PLUS_PAIR_INT8 , GxB_TIMES_PAIR_INT8 /**/, GxB_ANY_PAIR_INT8 ,
GxB_MIN_PAIR_INT16 /**/, GxB_MAX_PAIR_INT16 /**/, GxB_PLUS_PAIR_INT16 , GxB_TIMES_PAIR_INT16 /**/, GxB_ANY_PAIR_INT16 ,
GxB_MIN_PAIR_INT32 /**/, GxB_MAX_PAIR_INT32 /**/, GxB_PLUS_PAIR_INT32 , GxB_TIMES_PAIR_INT32 /**/, GxB_ANY_PAIR_INT32 ,
GxB_MIN_PAIR_INT64 /**/, GxB_MAX_PAIR_INT64 /**/, GxB_PLUS_PAIR_INT64 , GxB_TIMES_PAIR_INT64 /**/, GxB_ANY_PAIR_INT64 ,
GxB_MIN_PAIR_UINT8 /**/, GxB_MAX_PAIR_UINT8 /**/, GxB_PLUS_PAIR_UINT8 , GxB_TIMES_PAIR_UINT8 /**/, GxB_ANY_PAIR_UINT8 ,
GxB_MIN_PAIR_UINT16/**/, GxB_MAX_PAIR_UINT16/**/, GxB_PLUS_PAIR_UINT16 , GxB_TIMES_PAIR_UINT16/**/, GxB_ANY_PAIR_UINT16 ,
GxB_MIN_PAIR_UINT32/**/, GxB_MAX_PAIR_UINT32/**/, GxB_PLUS_PAIR_UINT32 , GxB_TIMES_PAIR_UINT32/**/, GxB_ANY_PAIR_UINT32 ,
GxB_MIN_PAIR_UINT64/**/, GxB_MAX_PAIR_UINT64/**/, GxB_PLUS_PAIR_UINT64 , GxB_TIMES_PAIR_UINT64/**/, GxB_ANY_PAIR_UINT64 ,
GxB_MIN_PAIR_FP32 /**/, GxB_MAX_PAIR_FP32 /**/, GxB_PLUS_PAIR_FP32 , GxB_TIMES_PAIR_FP32 /**/, GxB_ANY_PAIR_FP32 ,
GxB_MIN_PAIR_FP64 /**/, GxB_MAX_PAIR_FP64 /**/, GxB_PLUS_PAIR_FP64 , GxB_TIMES_PAIR_FP64 /**/, GxB_ANY_PAIR_FP64 ,
// semirings with multiply op: z = MIN (x,y), all types x,y,z the same:
GxB_MIN_MIN_INT8 , GxB_MAX_MIN_INT8 , GxB_PLUS_MIN_INT8 , GxB_TIMES_MIN_INT8 , GxB_ANY_MIN_INT8 ,
GxB_MIN_MIN_INT16 , GxB_MAX_MIN_INT16 , GxB_PLUS_MIN_INT16 , GxB_TIMES_MIN_INT16 , GxB_ANY_MIN_INT16 ,
GxB_MIN_MIN_INT32 , GxB_MAX_MIN_INT32 , GxB_PLUS_MIN_INT32 , GxB_TIMES_MIN_INT32 , GxB_ANY_MIN_INT32 ,
GxB_MIN_MIN_INT64 , GxB_MAX_MIN_INT64 , GxB_PLUS_MIN_INT64 , GxB_TIMES_MIN_INT64 , GxB_ANY_MIN_INT64 ,
GxB_MIN_MIN_UINT8 , GxB_MAX_MIN_UINT8 , GxB_PLUS_MIN_UINT8 , GxB_TIMES_MIN_UINT8 , GxB_ANY_MIN_UINT8 ,
GxB_MIN_MIN_UINT16 , GxB_MAX_MIN_UINT16 , GxB_PLUS_MIN_UINT16 , GxB_TIMES_MIN_UINT16 , GxB_ANY_MIN_UINT16 ,
GxB_MIN_MIN_UINT32 , GxB_MAX_MIN_UINT32 , GxB_PLUS_MIN_UINT32 , GxB_TIMES_MIN_UINT32 , GxB_ANY_MIN_UINT32 ,
GxB_MIN_MIN_UINT64 , GxB_MAX_MIN_UINT64 , GxB_PLUS_MIN_UINT64 , GxB_TIMES_MIN_UINT64 , GxB_ANY_MIN_UINT64 ,
GxB_MIN_MIN_FP32 , GxB_MAX_MIN_FP32 , GxB_PLUS_MIN_FP32 , GxB_TIMES_MIN_FP32 , GxB_ANY_MIN_FP32 ,
GxB_MIN_MIN_FP64 , GxB_MAX_MIN_FP64 , GxB_PLUS_MIN_FP64 , GxB_TIMES_MIN_FP64 , GxB_ANY_MIN_FP64 ,
// semirings with multiply op: z = MAX (x,y), all types x,y,z the same:
GxB_MIN_MAX_INT8 , GxB_MAX_MAX_INT8 , GxB_PLUS_MAX_INT8 , GxB_TIMES_MAX_INT8 , GxB_ANY_MAX_INT8 ,
GxB_MIN_MAX_INT16 , GxB_MAX_MAX_INT16 , GxB_PLUS_MAX_INT16 , GxB_TIMES_MAX_INT16 , GxB_ANY_MAX_INT16 ,
GxB_MIN_MAX_INT32 , GxB_MAX_MAX_INT32 , GxB_PLUS_MAX_INT32 , GxB_TIMES_MAX_INT32 , GxB_ANY_MAX_INT32 ,
GxB_MIN_MAX_INT64 , GxB_MAX_MAX_INT64 , GxB_PLUS_MAX_INT64 , GxB_TIMES_MAX_INT64 , GxB_ANY_MAX_INT64 ,
GxB_MIN_MAX_UINT8 , GxB_MAX_MAX_UINT8 , GxB_PLUS_MAX_UINT8 , GxB_TIMES_MAX_UINT8 , GxB_ANY_MAX_UINT8 ,
GxB_MIN_MAX_UINT16 , GxB_MAX_MAX_UINT16 , GxB_PLUS_MAX_UINT16 , GxB_TIMES_MAX_UINT16 , GxB_ANY_MAX_UINT16 ,
GxB_MIN_MAX_UINT32 , GxB_MAX_MAX_UINT32 , GxB_PLUS_MAX_UINT32 , GxB_TIMES_MAX_UINT32 , GxB_ANY_MAX_UINT32 ,
GxB_MIN_MAX_UINT64 , GxB_MAX_MAX_UINT64 , GxB_PLUS_MAX_UINT64 , GxB_TIMES_MAX_UINT64 , GxB_ANY_MAX_UINT64 ,
GxB_MIN_MAX_FP32 , GxB_MAX_MAX_FP32 , GxB_PLUS_MAX_FP32 , GxB_TIMES_MAX_FP32 , GxB_ANY_MAX_FP32 ,
GxB_MIN_MAX_FP64 , GxB_MAX_MAX_FP64 , GxB_PLUS_MAX_FP64 , GxB_TIMES_MAX_FP64 , GxB_ANY_MAX_FP64 ,
// semirings with multiply op: z = PLUS (x,y), all types x,y,z the same:
GxB_MIN_PLUS_INT8 , GxB_MAX_PLUS_INT8 , GxB_PLUS_PLUS_INT8 , GxB_TIMES_PLUS_INT8 , GxB_ANY_PLUS_INT8 ,
GxB_MIN_PLUS_INT16 , GxB_MAX_PLUS_INT16 , GxB_PLUS_PLUS_INT16 , GxB_TIMES_PLUS_INT16 , GxB_ANY_PLUS_INT16 ,
GxB_MIN_PLUS_INT32 , GxB_MAX_PLUS_INT32 , GxB_PLUS_PLUS_INT32 , GxB_TIMES_PLUS_INT32 , GxB_ANY_PLUS_INT32 ,
GxB_MIN_PLUS_INT64 , GxB_MAX_PLUS_INT64 , GxB_PLUS_PLUS_INT64 , GxB_TIMES_PLUS_INT64 , GxB_ANY_PLUS_INT64 ,
GxB_MIN_PLUS_UINT8 , GxB_MAX_PLUS_UINT8 , GxB_PLUS_PLUS_UINT8 , GxB_TIMES_PLUS_UINT8 , GxB_ANY_PLUS_UINT8 ,
GxB_MIN_PLUS_UINT16 , GxB_MAX_PLUS_UINT16 , GxB_PLUS_PLUS_UINT16 , GxB_TIMES_PLUS_UINT16 , GxB_ANY_PLUS_UINT16 ,
GxB_MIN_PLUS_UINT32 , GxB_MAX_PLUS_UINT32 , GxB_PLUS_PLUS_UINT32 , GxB_TIMES_PLUS_UINT32 , GxB_ANY_PLUS_UINT32 ,
GxB_MIN_PLUS_UINT64 , GxB_MAX_PLUS_UINT64 , GxB_PLUS_PLUS_UINT64 , GxB_TIMES_PLUS_UINT64 , GxB_ANY_PLUS_UINT64 ,
GxB_MIN_PLUS_FP32 , GxB_MAX_PLUS_FP32 , GxB_PLUS_PLUS_FP32 , GxB_TIMES_PLUS_FP32 , GxB_ANY_PLUS_FP32 ,
GxB_MIN_PLUS_FP64 , GxB_MAX_PLUS_FP64 , GxB_PLUS_PLUS_FP64 , GxB_TIMES_PLUS_FP64 , GxB_ANY_PLUS_FP64 ,
// semirings with multiply op: z = MINUS (x,y), all types x,y,z the same:
GxB_MIN_MINUS_INT8 , GxB_MAX_MINUS_INT8 , GxB_PLUS_MINUS_INT8 , GxB_TIMES_MINUS_INT8 , GxB_ANY_MINUS_INT8 ,
GxB_MIN_MINUS_INT16 , GxB_MAX_MINUS_INT16 , GxB_PLUS_MINUS_INT16 , GxB_TIMES_MINUS_INT16 , GxB_ANY_MINUS_INT16 ,
GxB_MIN_MINUS_INT32 , GxB_MAX_MINUS_INT32 , GxB_PLUS_MINUS_INT32 , GxB_TIMES_MINUS_INT32 , GxB_ANY_MINUS_INT32 ,
GxB_MIN_MINUS_INT64 , GxB_MAX_MINUS_INT64 , GxB_PLUS_MINUS_INT64 , GxB_TIMES_MINUS_INT64 , GxB_ANY_MINUS_INT64 ,
GxB_MIN_MINUS_UINT8 , GxB_MAX_MINUS_UINT8 , GxB_PLUS_MINUS_UINT8 , GxB_TIMES_MINUS_UINT8 , GxB_ANY_MINUS_UINT8 ,
GxB_MIN_MINUS_UINT16 , GxB_MAX_MINUS_UINT16 , GxB_PLUS_MINUS_UINT16 , GxB_TIMES_MINUS_UINT16 , GxB_ANY_MINUS_UINT16 ,
GxB_MIN_MINUS_UINT32 , GxB_MAX_MINUS_UINT32 , GxB_PLUS_MINUS_UINT32 , GxB_TIMES_MINUS_UINT32 , GxB_ANY_MINUS_UINT32 ,
GxB_MIN_MINUS_UINT64 , GxB_MAX_MINUS_UINT64 , GxB_PLUS_MINUS_UINT64 , GxB_TIMES_MINUS_UINT64 , GxB_ANY_MINUS_UINT64 ,
GxB_MIN_MINUS_FP32 , GxB_MAX_MINUS_FP32 , GxB_PLUS_MINUS_FP32 , GxB_TIMES_MINUS_FP32 , GxB_ANY_MINUS_FP32 ,
GxB_MIN_MINUS_FP64 , GxB_MAX_MINUS_FP64 , GxB_PLUS_MINUS_FP64 , GxB_TIMES_MINUS_FP64 , GxB_ANY_MINUS_FP64 ,
// semirings with multiply op: z = TIMES (x,y), all types x,y,z the same:
GxB_MIN_TIMES_INT8 , GxB_MAX_TIMES_INT8 , GxB_PLUS_TIMES_INT8 , GxB_TIMES_TIMES_INT8 , GxB_ANY_TIMES_INT8 ,
GxB_MIN_TIMES_INT16 , GxB_MAX_TIMES_INT16 , GxB_PLUS_TIMES_INT16 , GxB_TIMES_TIMES_INT16 , GxB_ANY_TIMES_INT16 ,
GxB_MIN_TIMES_INT32 , GxB_MAX_TIMES_INT32 , GxB_PLUS_TIMES_INT32 , GxB_TIMES_TIMES_INT32 , GxB_ANY_TIMES_INT32 ,
GxB_MIN_TIMES_INT64 , GxB_MAX_TIMES_INT64 , GxB_PLUS_TIMES_INT64 , GxB_TIMES_TIMES_INT64 , GxB_ANY_TIMES_INT64 ,
GxB_MIN_TIMES_UINT8 , GxB_MAX_TIMES_UINT8 , GxB_PLUS_TIMES_UINT8 , GxB_TIMES_TIMES_UINT8 , GxB_ANY_TIMES_UINT8 ,
GxB_MIN_TIMES_UINT16 , GxB_MAX_TIMES_UINT16 , GxB_PLUS_TIMES_UINT16 , GxB_TIMES_TIMES_UINT16 , GxB_ANY_TIMES_UINT16 ,
GxB_MIN_TIMES_UINT32 , GxB_MAX_TIMES_UINT32 , GxB_PLUS_TIMES_UINT32 , GxB_TIMES_TIMES_UINT32 , GxB_ANY_TIMES_UINT32 ,
GxB_MIN_TIMES_UINT64 , GxB_MAX_TIMES_UINT64 , GxB_PLUS_TIMES_UINT64 , GxB_TIMES_TIMES_UINT64 , GxB_ANY_TIMES_UINT64 ,
GxB_MIN_TIMES_FP32 , GxB_MAX_TIMES_FP32 , GxB_PLUS_TIMES_FP32 , GxB_TIMES_TIMES_FP32 , GxB_ANY_TIMES_FP32 ,
GxB_MIN_TIMES_FP64 , GxB_MAX_TIMES_FP64 , GxB_PLUS_TIMES_FP64 , GxB_TIMES_TIMES_FP64 , GxB_ANY_TIMES_FP64 ,
// semirings with multiply op: z = DIV (x,y), all types x,y,z the same:
GxB_MIN_DIV_INT8 , GxB_MAX_DIV_INT8 , GxB_PLUS_DIV_INT8 , GxB_TIMES_DIV_INT8 , GxB_ANY_DIV_INT8 ,
GxB_MIN_DIV_INT16 , GxB_MAX_DIV_INT16 , GxB_PLUS_DIV_INT16 , GxB_TIMES_DIV_INT16 , GxB_ANY_DIV_INT16 ,
GxB_MIN_DIV_INT32 , GxB_MAX_DIV_INT32 , GxB_PLUS_DIV_INT32 , GxB_TIMES_DIV_INT32 , GxB_ANY_DIV_INT32 ,
GxB_MIN_DIV_INT64 , GxB_MAX_DIV_INT64 , GxB_PLUS_DIV_INT64 , GxB_TIMES_DIV_INT64 , GxB_ANY_DIV_INT64 ,
GxB_MIN_DIV_UINT8 , GxB_MAX_DIV_UINT8 , GxB_PLUS_DIV_UINT8 , GxB_TIMES_DIV_UINT8 , GxB_ANY_DIV_UINT8 ,
GxB_MIN_DIV_UINT16 , GxB_MAX_DIV_UINT16 , GxB_PLUS_DIV_UINT16 , GxB_TIMES_DIV_UINT16 , GxB_ANY_DIV_UINT16 ,
GxB_MIN_DIV_UINT32 , GxB_MAX_DIV_UINT32 , GxB_PLUS_DIV_UINT32 , GxB_TIMES_DIV_UINT32 , GxB_ANY_DIV_UINT32 ,
GxB_MIN_DIV_UINT64 , GxB_MAX_DIV_UINT64 , GxB_PLUS_DIV_UINT64 , GxB_TIMES_DIV_UINT64 , GxB_ANY_DIV_UINT64 ,
GxB_MIN_DIV_FP32 , GxB_MAX_DIV_FP32 , GxB_PLUS_DIV_FP32 , GxB_TIMES_DIV_FP32 , GxB_ANY_DIV_FP32 ,
GxB_MIN_DIV_FP64 , GxB_MAX_DIV_FP64 , GxB_PLUS_DIV_FP64 , GxB_TIMES_DIV_FP64 , GxB_ANY_DIV_FP64 ,
// semirings with multiply op: z = RDIV (x,y), all types x,y,z the same:
GxB_MIN_RDIV_INT8 , GxB_MAX_RDIV_INT8 , GxB_PLUS_RDIV_INT8 , GxB_TIMES_RDIV_INT8 , GxB_ANY_RDIV_INT8 ,
GxB_MIN_RDIV_INT16 , GxB_MAX_RDIV_INT16 , GxB_PLUS_RDIV_INT16 , GxB_TIMES_RDIV_INT16 , GxB_ANY_RDIV_INT16 ,
GxB_MIN_RDIV_INT32 , GxB_MAX_RDIV_INT32 , GxB_PLUS_RDIV_INT32 , GxB_TIMES_RDIV_INT32 , GxB_ANY_RDIV_INT32 ,
GxB_MIN_RDIV_INT64 , GxB_MAX_RDIV_INT64 , GxB_PLUS_RDIV_INT64 , GxB_TIMES_RDIV_INT64 , GxB_ANY_RDIV_INT64 ,
GxB_MIN_RDIV_UINT8 , GxB_MAX_RDIV_UINT8 , GxB_PLUS_RDIV_UINT8 , GxB_TIMES_RDIV_UINT8 , GxB_ANY_RDIV_UINT8 ,
GxB_MIN_RDIV_UINT16 , GxB_MAX_RDIV_UINT16 , GxB_PLUS_RDIV_UINT16 , GxB_TIMES_RDIV_UINT16 , GxB_ANY_RDIV_UINT16 ,
GxB_MIN_RDIV_UINT32 , GxB_MAX_RDIV_UINT32 , GxB_PLUS_RDIV_UINT32 , GxB_TIMES_RDIV_UINT32 , GxB_ANY_RDIV_UINT32 ,
GxB_MIN_RDIV_UINT64 , GxB_MAX_RDIV_UINT64 , GxB_PLUS_RDIV_UINT64 , GxB_TIMES_RDIV_UINT64 , GxB_ANY_RDIV_UINT64 ,
GxB_MIN_RDIV_FP32 , GxB_MAX_RDIV_FP32 , GxB_PLUS_RDIV_FP32 , GxB_TIMES_RDIV_FP32 , GxB_ANY_RDIV_FP32 ,
GxB_MIN_RDIV_FP64 , GxB_MAX_RDIV_FP64 , GxB_PLUS_RDIV_FP64 , GxB_TIMES_RDIV_FP64 , GxB_ANY_RDIV_FP64 ,
// semirings with multiply op: z = RMINUS (x,y), all types x,y,z the same:
GxB_MIN_RMINUS_INT8 , GxB_MAX_RMINUS_INT8 , GxB_PLUS_RMINUS_INT8 , GxB_TIMES_RMINUS_INT8 , GxB_ANY_RMINUS_INT8 ,
GxB_MIN_RMINUS_INT16 , GxB_MAX_RMINUS_INT16 , GxB_PLUS_RMINUS_INT16 , GxB_TIMES_RMINUS_INT16 , GxB_ANY_RMINUS_INT16 ,
GxB_MIN_RMINUS_INT32 , GxB_MAX_RMINUS_INT32 , GxB_PLUS_RMINUS_INT32 , GxB_TIMES_RMINUS_INT32 , GxB_ANY_RMINUS_INT32 ,
GxB_MIN_RMINUS_INT64 , GxB_MAX_RMINUS_INT64 , GxB_PLUS_RMINUS_INT64 , GxB_TIMES_RMINUS_INT64 , GxB_ANY_RMINUS_INT64 ,
GxB_MIN_RMINUS_UINT8 , GxB_MAX_RMINUS_UINT8 , GxB_PLUS_RMINUS_UINT8 , GxB_TIMES_RMINUS_UINT8 , GxB_ANY_RMINUS_UINT8 ,
GxB_MIN_RMINUS_UINT16 , GxB_MAX_RMINUS_UINT16 , GxB_PLUS_RMINUS_UINT16 , GxB_TIMES_RMINUS_UINT16, GxB_ANY_RMINUS_UINT16 ,
GxB_MIN_RMINUS_UINT32 , GxB_MAX_RMINUS_UINT32 , GxB_PLUS_RMINUS_UINT32 , GxB_TIMES_RMINUS_UINT32, GxB_ANY_RMINUS_UINT32 ,
GxB_MIN_RMINUS_UINT64 , GxB_MAX_RMINUS_UINT64 , GxB_PLUS_RMINUS_UINT64 , GxB_TIMES_RMINUS_UINT64, GxB_ANY_RMINUS_UINT64 ,
GxB_MIN_RMINUS_FP32 , GxB_MAX_RMINUS_FP32 , GxB_PLUS_RMINUS_FP32 , GxB_TIMES_RMINUS_FP32 , GxB_ANY_RMINUS_FP32 ,
GxB_MIN_RMINUS_FP64 , GxB_MAX_RMINUS_FP64 , GxB_PLUS_RMINUS_FP64 , GxB_TIMES_RMINUS_FP64 , GxB_ANY_RMINUS_FP64 ,
// semirings with multiply op: z = ISEQ (x,y), all types x,y,z the same:
GxB_MIN_ISEQ_INT8 , GxB_MAX_ISEQ_INT8 , GxB_PLUS_ISEQ_INT8 , GxB_TIMES_ISEQ_INT8 , GxB_ANY_ISEQ_INT8 ,
GxB_MIN_ISEQ_INT16 , GxB_MAX_ISEQ_INT16 , GxB_PLUS_ISEQ_INT16 , GxB_TIMES_ISEQ_INT16 , GxB_ANY_ISEQ_INT16 ,
GxB_MIN_ISEQ_INT32 , GxB_MAX_ISEQ_INT32 , GxB_PLUS_ISEQ_INT32 , GxB_TIMES_ISEQ_INT32 , GxB_ANY_ISEQ_INT32 ,
GxB_MIN_ISEQ_INT64 , GxB_MAX_ISEQ_INT64 , GxB_PLUS_ISEQ_INT64 , GxB_TIMES_ISEQ_INT64 , GxB_ANY_ISEQ_INT64 ,
GxB_MIN_ISEQ_UINT8 , GxB_MAX_ISEQ_UINT8 , GxB_PLUS_ISEQ_UINT8 , GxB_TIMES_ISEQ_UINT8 , GxB_ANY_ISEQ_UINT8 ,
GxB_MIN_ISEQ_UINT16 , GxB_MAX_ISEQ_UINT16 , GxB_PLUS_ISEQ_UINT16 , GxB_TIMES_ISEQ_UINT16 , GxB_ANY_ISEQ_UINT16 ,
GxB_MIN_ISEQ_UINT32 , GxB_MAX_ISEQ_UINT32 , GxB_PLUS_ISEQ_UINT32 , GxB_TIMES_ISEQ_UINT32 , GxB_ANY_ISEQ_UINT32 ,
GxB_MIN_ISEQ_UINT64 , GxB_MAX_ISEQ_UINT64 , GxB_PLUS_ISEQ_UINT64 , GxB_TIMES_ISEQ_UINT64 , GxB_ANY_ISEQ_UINT64 ,
GxB_MIN_ISEQ_FP32 , GxB_MAX_ISEQ_FP32 , GxB_PLUS_ISEQ_FP32 , GxB_TIMES_ISEQ_FP32 , GxB_ANY_ISEQ_FP32 ,
GxB_MIN_ISEQ_FP64 , GxB_MAX_ISEQ_FP64 , GxB_PLUS_ISEQ_FP64 , GxB_TIMES_ISEQ_FP64 , GxB_ANY_ISEQ_FP64 ,
// semirings with multiply op: z = ISNE (x,y), all types x,y,z the same:
GxB_MIN_ISNE_INT8 , GxB_MAX_ISNE_INT8 , GxB_PLUS_ISNE_INT8 , GxB_TIMES_ISNE_INT8 , GxB_ANY_ISNE_INT8 ,
GxB_MIN_ISNE_INT16 , GxB_MAX_ISNE_INT16 , GxB_PLUS_ISNE_INT16 , GxB_TIMES_ISNE_INT16 , GxB_ANY_ISNE_INT16 ,
GxB_MIN_ISNE_INT32 , GxB_MAX_ISNE_INT32 , GxB_PLUS_ISNE_INT32 , GxB_TIMES_ISNE_INT32 , GxB_ANY_ISNE_INT32 ,
GxB_MIN_ISNE_INT64 , GxB_MAX_ISNE_INT64 , GxB_PLUS_ISNE_INT64 , GxB_TIMES_ISNE_INT64 , GxB_ANY_ISNE_INT64 ,
GxB_MIN_ISNE_UINT8 , GxB_MAX_ISNE_UINT8 , GxB_PLUS_ISNE_UINT8 , GxB_TIMES_ISNE_UINT8 , GxB_ANY_ISNE_UINT8 ,
GxB_MIN_ISNE_UINT16 , GxB_MAX_ISNE_UINT16 , GxB_PLUS_ISNE_UINT16 , GxB_TIMES_ISNE_UINT16 , GxB_ANY_ISNE_UINT16 ,
GxB_MIN_ISNE_UINT32 , GxB_MAX_ISNE_UINT32 , GxB_PLUS_ISNE_UINT32 , GxB_TIMES_ISNE_UINT32 , GxB_ANY_ISNE_UINT32 ,
GxB_MIN_ISNE_UINT64 , GxB_MAX_ISNE_UINT64 , GxB_PLUS_ISNE_UINT64 , GxB_TIMES_ISNE_UINT64 , GxB_ANY_ISNE_UINT64 ,
GxB_MIN_ISNE_FP32 , GxB_MAX_ISNE_FP32 , GxB_PLUS_ISNE_FP32 , GxB_TIMES_ISNE_FP32 , GxB_ANY_ISNE_FP32 ,
GxB_MIN_ISNE_FP64 , GxB_MAX_ISNE_FP64 , GxB_PLUS_ISNE_FP64 , GxB_TIMES_ISNE_FP64 , GxB_ANY_ISNE_FP64 ,
// semirings with multiply op: z = ISGT (x,y), all types x,y,z the same:
GxB_MIN_ISGT_INT8 , GxB_MAX_ISGT_INT8 , GxB_PLUS_ISGT_INT8 , GxB_TIMES_ISGT_INT8 , GxB_ANY_ISGT_INT8 ,
GxB_MIN_ISGT_INT16 , GxB_MAX_ISGT_INT16 , GxB_PLUS_ISGT_INT16 , GxB_TIMES_ISGT_INT16 , GxB_ANY_ISGT_INT16 ,
GxB_MIN_ISGT_INT32 , GxB_MAX_ISGT_INT32 , GxB_PLUS_ISGT_INT32 , GxB_TIMES_ISGT_INT32 , GxB_ANY_ISGT_INT32 ,
GxB_MIN_ISGT_INT64 , GxB_MAX_ISGT_INT64 , GxB_PLUS_ISGT_INT64 , GxB_TIMES_ISGT_INT64 , GxB_ANY_ISGT_INT64 ,
GxB_MIN_ISGT_UINT8 , GxB_MAX_ISGT_UINT8 , GxB_PLUS_ISGT_UINT8 , GxB_TIMES_ISGT_UINT8 , GxB_ANY_ISGT_UINT8 ,
GxB_MIN_ISGT_UINT16 , GxB_MAX_ISGT_UINT16 , GxB_PLUS_ISGT_UINT16 , GxB_TIMES_ISGT_UINT16 , GxB_ANY_ISGT_UINT16 ,
GxB_MIN_ISGT_UINT32 , GxB_MAX_ISGT_UINT32 , GxB_PLUS_ISGT_UINT32 , GxB_TIMES_ISGT_UINT32 , GxB_ANY_ISGT_UINT32 ,
GxB_MIN_ISGT_UINT64 , GxB_MAX_ISGT_UINT64 , GxB_PLUS_ISGT_UINT64 , GxB_TIMES_ISGT_UINT64 , GxB_ANY_ISGT_UINT64 ,
GxB_MIN_ISGT_FP32 , GxB_MAX_ISGT_FP32 , GxB_PLUS_ISGT_FP32 , GxB_TIMES_ISGT_FP32 , GxB_ANY_ISGT_FP32 ,
GxB_MIN_ISGT_FP64 , GxB_MAX_ISGT_FP64 , GxB_PLUS_ISGT_FP64 , GxB_TIMES_ISGT_FP64 , GxB_ANY_ISGT_FP64 ,
// semirings with multiply op: z = ISLT (x,y), all types x,y,z the same:
GxB_MIN_ISLT_INT8 , GxB_MAX_ISLT_INT8 , GxB_PLUS_ISLT_INT8 , GxB_TIMES_ISLT_INT8 , GxB_ANY_ISLT_INT8 ,
GxB_MIN_ISLT_INT16 , GxB_MAX_ISLT_INT16 , GxB_PLUS_ISLT_INT16 , GxB_TIMES_ISLT_INT16 , GxB_ANY_ISLT_INT16 ,
GxB_MIN_ISLT_INT32 , GxB_MAX_ISLT_INT32 , GxB_PLUS_ISLT_INT32 , GxB_TIMES_ISLT_INT32 , GxB_ANY_ISLT_INT32 ,
GxB_MIN_ISLT_INT64 , GxB_MAX_ISLT_INT64 , GxB_PLUS_ISLT_INT64 , GxB_TIMES_ISLT_INT64 , GxB_ANY_ISLT_INT64 ,
GxB_MIN_ISLT_UINT8 , GxB_MAX_ISLT_UINT8 , GxB_PLUS_ISLT_UINT8 , GxB_TIMES_ISLT_UINT8 , GxB_ANY_ISLT_UINT8 ,
GxB_MIN_ISLT_UINT16 , GxB_MAX_ISLT_UINT16 , GxB_PLUS_ISLT_UINT16 , GxB_TIMES_ISLT_UINT16 , GxB_ANY_ISLT_UINT16 ,
GxB_MIN_ISLT_UINT32 , GxB_MAX_ISLT_UINT32 , GxB_PLUS_ISLT_UINT32 , GxB_TIMES_ISLT_UINT32 , GxB_ANY_ISLT_UINT32 ,
GxB_MIN_ISLT_UINT64 , GxB_MAX_ISLT_UINT64 , GxB_PLUS_ISLT_UINT64 , GxB_TIMES_ISLT_UINT64 , GxB_ANY_ISLT_UINT64 ,
GxB_MIN_ISLT_FP32 , GxB_MAX_ISLT_FP32 , GxB_PLUS_ISLT_FP32 , GxB_TIMES_ISLT_FP32 , GxB_ANY_ISLT_FP32 ,
GxB_MIN_ISLT_FP64 , GxB_MAX_ISLT_FP64 , GxB_PLUS_ISLT_FP64 , GxB_TIMES_ISLT_FP64 , GxB_ANY_ISLT_FP64 ,
// semirings with multiply op: z = ISGE (x,y), all types x,y,z the same:
GxB_MIN_ISGE_INT8 , GxB_MAX_ISGE_INT8 , GxB_PLUS_ISGE_INT8 , GxB_TIMES_ISGE_INT8 , GxB_ANY_ISGE_INT8 ,
GxB_MIN_ISGE_INT16 , GxB_MAX_ISGE_INT16 , GxB_PLUS_ISGE_INT16 , GxB_TIMES_ISGE_INT16 , GxB_ANY_ISGE_INT16 ,
GxB_MIN_ISGE_INT32 , GxB_MAX_ISGE_INT32 , GxB_PLUS_ISGE_INT32 , GxB_TIMES_ISGE_INT32 , GxB_ANY_ISGE_INT32 ,
GxB_MIN_ISGE_INT64 , GxB_MAX_ISGE_INT64 , GxB_PLUS_ISGE_INT64 , GxB_TIMES_ISGE_INT64 , GxB_ANY_ISGE_INT64 ,
GxB_MIN_ISGE_UINT8 , GxB_MAX_ISGE_UINT8 , GxB_PLUS_ISGE_UINT8 , GxB_TIMES_ISGE_UINT8 , GxB_ANY_ISGE_UINT8 ,
GxB_MIN_ISGE_UINT16 , GxB_MAX_ISGE_UINT16 , GxB_PLUS_ISGE_UINT16 , GxB_TIMES_ISGE_UINT16 , GxB_ANY_ISGE_UINT16 ,
GxB_MIN_ISGE_UINT32 , GxB_MAX_ISGE_UINT32 , GxB_PLUS_ISGE_UINT32 , GxB_TIMES_ISGE_UINT32 , GxB_ANY_ISGE_UINT32 ,
GxB_MIN_ISGE_UINT64 , GxB_MAX_ISGE_UINT64 , GxB_PLUS_ISGE_UINT64 , GxB_TIMES_ISGE_UINT64 , GxB_ANY_ISGE_UINT64 ,
GxB_MIN_ISGE_FP32 , GxB_MAX_ISGE_FP32 , GxB_PLUS_ISGE_FP32 , GxB_TIMES_ISGE_FP32 , GxB_ANY_ISGE_FP32 ,
GxB_MIN_ISGE_FP64 , GxB_MAX_ISGE_FP64 , GxB_PLUS_ISGE_FP64 , GxB_TIMES_ISGE_FP64 , GxB_ANY_ISGE_FP64 ,
// semirings with multiply op: z = ISLE (x,y), all types x,y,z the same:
GxB_MIN_ISLE_INT8 , GxB_MAX_ISLE_INT8 , GxB_PLUS_ISLE_INT8 , GxB_TIMES_ISLE_INT8 , GxB_ANY_ISLE_INT8 ,
GxB_MIN_ISLE_INT16 , GxB_MAX_ISLE_INT16 , GxB_PLUS_ISLE_INT16 , GxB_TIMES_ISLE_INT16 , GxB_ANY_ISLE_INT16 ,
GxB_MIN_ISLE_INT32 , GxB_MAX_ISLE_INT32 , GxB_PLUS_ISLE_INT32 , GxB_TIMES_ISLE_INT32 , GxB_ANY_ISLE_INT32 ,
GxB_MIN_ISLE_INT64 , GxB_MAX_ISLE_INT64 , GxB_PLUS_ISLE_INT64 , GxB_TIMES_ISLE_INT64 , GxB_ANY_ISLE_INT64 ,
GxB_MIN_ISLE_UINT8 , GxB_MAX_ISLE_UINT8 , GxB_PLUS_ISLE_UINT8 , GxB_TIMES_ISLE_UINT8 , GxB_ANY_ISLE_UINT8 ,
GxB_MIN_ISLE_UINT16 , GxB_MAX_ISLE_UINT16 , GxB_PLUS_ISLE_UINT16 , GxB_TIMES_ISLE_UINT16 , GxB_ANY_ISLE_UINT16 ,
GxB_MIN_ISLE_UINT32 , GxB_MAX_ISLE_UINT32 , GxB_PLUS_ISLE_UINT32 , GxB_TIMES_ISLE_UINT32 , GxB_ANY_ISLE_UINT32 ,
GxB_MIN_ISLE_UINT64 , GxB_MAX_ISLE_UINT64 , GxB_PLUS_ISLE_UINT64 , GxB_TIMES_ISLE_UINT64 , GxB_ANY_ISLE_UINT64 ,
GxB_MIN_ISLE_FP32 , GxB_MAX_ISLE_FP32 , GxB_PLUS_ISLE_FP32 , GxB_TIMES_ISLE_FP32 , GxB_ANY_ISLE_FP32 ,
GxB_MIN_ISLE_FP64 , GxB_MAX_ISLE_FP64 , GxB_PLUS_ISLE_FP64 , GxB_TIMES_ISLE_FP64 , GxB_ANY_ISLE_FP64 ,
// semirings with multiply op: z = LOR (x,y), all types x,y,z the same:
GxB_MIN_LOR_INT8 , GxB_MAX_LOR_INT8 , GxB_PLUS_LOR_INT8 , GxB_TIMES_LOR_INT8 , GxB_ANY_LOR_INT8 ,
GxB_MIN_LOR_INT16 , GxB_MAX_LOR_INT16 , GxB_PLUS_LOR_INT16 , GxB_TIMES_LOR_INT16 , GxB_ANY_LOR_INT16 ,
GxB_MIN_LOR_INT32 , GxB_MAX_LOR_INT32 , GxB_PLUS_LOR_INT32 , GxB_TIMES_LOR_INT32 , GxB_ANY_LOR_INT32 ,
GxB_MIN_LOR_INT64 , GxB_MAX_LOR_INT64 , GxB_PLUS_LOR_INT64 , GxB_TIMES_LOR_INT64 , GxB_ANY_LOR_INT64 ,
GxB_MIN_LOR_UINT8 , GxB_MAX_LOR_UINT8 , GxB_PLUS_LOR_UINT8 , GxB_TIMES_LOR_UINT8 , GxB_ANY_LOR_UINT8 ,
GxB_MIN_LOR_UINT16 , GxB_MAX_LOR_UINT16 , GxB_PLUS_LOR_UINT16 , GxB_TIMES_LOR_UINT16 , GxB_ANY_LOR_UINT16 ,
GxB_MIN_LOR_UINT32 , GxB_MAX_LOR_UINT32 , GxB_PLUS_LOR_UINT32 , GxB_TIMES_LOR_UINT32 , GxB_ANY_LOR_UINT32 ,
GxB_MIN_LOR_UINT64 , GxB_MAX_LOR_UINT64 , GxB_PLUS_LOR_UINT64 , GxB_TIMES_LOR_UINT64 , GxB_ANY_LOR_UINT64 ,
GxB_MIN_LOR_FP32 , GxB_MAX_LOR_FP32 , GxB_PLUS_LOR_FP32 , GxB_TIMES_LOR_FP32 , GxB_ANY_LOR_FP32 ,
GxB_MIN_LOR_FP64 , GxB_MAX_LOR_FP64 , GxB_PLUS_LOR_FP64 , GxB_TIMES_LOR_FP64 , GxB_ANY_LOR_FP64 ,
// semirings with multiply op: z = LAND (x,y), all types x,y,z the same:
GxB_MIN_LAND_INT8 , GxB_MAX_LAND_INT8 , GxB_PLUS_LAND_INT8 , GxB_TIMES_LAND_INT8 , GxB_ANY_LAND_INT8 ,
GxB_MIN_LAND_INT16 , GxB_MAX_LAND_INT16 , GxB_PLUS_LAND_INT16 , GxB_TIMES_LAND_INT16 , GxB_ANY_LAND_INT16 ,
GxB_MIN_LAND_INT32 , GxB_MAX_LAND_INT32 , GxB_PLUS_LAND_INT32 , GxB_TIMES_LAND_INT32 , GxB_ANY_LAND_INT32 ,
GxB_MIN_LAND_INT64 , GxB_MAX_LAND_INT64 , GxB_PLUS_LAND_INT64 , GxB_TIMES_LAND_INT64 , GxB_ANY_LAND_INT64 ,
GxB_MIN_LAND_UINT8 , GxB_MAX_LAND_UINT8 , GxB_PLUS_LAND_UINT8 , GxB_TIMES_LAND_UINT8 , GxB_ANY_LAND_UINT8 ,
GxB_MIN_LAND_UINT16 , GxB_MAX_LAND_UINT16 , GxB_PLUS_LAND_UINT16 , GxB_TIMES_LAND_UINT16 , GxB_ANY_LAND_UINT16 ,
GxB_MIN_LAND_UINT32 , GxB_MAX_LAND_UINT32 , GxB_PLUS_LAND_UINT32 , GxB_TIMES_LAND_UINT32 , GxB_ANY_LAND_UINT32 ,
GxB_MIN_LAND_UINT64 , GxB_MAX_LAND_UINT64 , GxB_PLUS_LAND_UINT64 , GxB_TIMES_LAND_UINT64 , GxB_ANY_LAND_UINT64 ,
GxB_MIN_LAND_FP32 , GxB_MAX_LAND_FP32 , GxB_PLUS_LAND_FP32 , GxB_TIMES_LAND_FP32 , GxB_ANY_LAND_FP32 ,
GxB_MIN_LAND_FP64 , GxB_MAX_LAND_FP64 , GxB_PLUS_LAND_FP64 , GxB_TIMES_LAND_FP64 , GxB_ANY_LAND_FP64 ,
// semirings with multiply op: z = LXOR (x,y), all types x,y,z the same:
GxB_MIN_LXOR_INT8 , GxB_MAX_LXOR_INT8 , GxB_PLUS_LXOR_INT8 , GxB_TIMES_LXOR_INT8 , GxB_ANY_LXOR_INT8 ,
GxB_MIN_LXOR_INT16 , GxB_MAX_LXOR_INT16 , GxB_PLUS_LXOR_INT16 , GxB_TIMES_LXOR_INT16 , GxB_ANY_LXOR_INT16 ,
GxB_MIN_LXOR_INT32 , GxB_MAX_LXOR_INT32 , GxB_PLUS_LXOR_INT32 , GxB_TIMES_LXOR_INT32 , GxB_ANY_LXOR_INT32 ,
GxB_MIN_LXOR_INT64 , GxB_MAX_LXOR_INT64 , GxB_PLUS_LXOR_INT64 , GxB_TIMES_LXOR_INT64 , GxB_ANY_LXOR_INT64 ,
GxB_MIN_LXOR_UINT8 , GxB_MAX_LXOR_UINT8 , GxB_PLUS_LXOR_UINT8 , GxB_TIMES_LXOR_UINT8 , GxB_ANY_LXOR_UINT8 ,
GxB_MIN_LXOR_UINT16 , GxB_MAX_LXOR_UINT16 , GxB_PLUS_LXOR_UINT16 , GxB_TIMES_LXOR_UINT16 , GxB_ANY_LXOR_UINT16 ,
GxB_MIN_LXOR_UINT32 , GxB_MAX_LXOR_UINT32 , GxB_PLUS_LXOR_UINT32 , GxB_TIMES_LXOR_UINT32 , GxB_ANY_LXOR_UINT32 ,
GxB_MIN_LXOR_UINT64 , GxB_MAX_LXOR_UINT64 , GxB_PLUS_LXOR_UINT64 , GxB_TIMES_LXOR_UINT64 , GxB_ANY_LXOR_UINT64 ,
GxB_MIN_LXOR_FP32 , GxB_MAX_LXOR_FP32 , GxB_PLUS_LXOR_FP32 , GxB_TIMES_LXOR_FP32 , GxB_ANY_LXOR_FP32 ,
GxB_MIN_LXOR_FP64 , GxB_MAX_LXOR_FP64 , GxB_PLUS_LXOR_FP64 , GxB_TIMES_LXOR_FP64 , GxB_ANY_LXOR_FP64 ,
//------------------------------------------------------------------------------
// 300 semirings with a comparator TxT -> bool, where T is non-Boolean
//------------------------------------------------------------------------------
// In the 4th column the GxB_EQ_*_* semirings could also be called
// GxB_LXNOR_*_*, since the EQ and LXNOR boolean operators are identical
// but those names are not included.
// semirings with multiply op: z = EQ (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_EQ_INT8 , GxB_LAND_EQ_INT8 , GxB_LXOR_EQ_INT8 , GxB_EQ_EQ_INT8 , GxB_ANY_EQ_INT8 ,
GxB_LOR_EQ_INT16 , GxB_LAND_EQ_INT16 , GxB_LXOR_EQ_INT16 , GxB_EQ_EQ_INT16 , GxB_ANY_EQ_INT16 ,
GxB_LOR_EQ_INT32 , GxB_LAND_EQ_INT32 , GxB_LXOR_EQ_INT32 , GxB_EQ_EQ_INT32 , GxB_ANY_EQ_INT32 ,
GxB_LOR_EQ_INT64 , GxB_LAND_EQ_INT64 , GxB_LXOR_EQ_INT64 , GxB_EQ_EQ_INT64 , GxB_ANY_EQ_INT64 ,
GxB_LOR_EQ_UINT8 , GxB_LAND_EQ_UINT8 , GxB_LXOR_EQ_UINT8 , GxB_EQ_EQ_UINT8 , GxB_ANY_EQ_UINT8 ,
GxB_LOR_EQ_UINT16 , GxB_LAND_EQ_UINT16 , GxB_LXOR_EQ_UINT16 , GxB_EQ_EQ_UINT16 , GxB_ANY_EQ_UINT16 ,
GxB_LOR_EQ_UINT32 , GxB_LAND_EQ_UINT32 , GxB_LXOR_EQ_UINT32 , GxB_EQ_EQ_UINT32 , GxB_ANY_EQ_UINT32 ,
GxB_LOR_EQ_UINT64 , GxB_LAND_EQ_UINT64 , GxB_LXOR_EQ_UINT64 , GxB_EQ_EQ_UINT64 , GxB_ANY_EQ_UINT64 ,
GxB_LOR_EQ_FP32 , GxB_LAND_EQ_FP32 , GxB_LXOR_EQ_FP32 , GxB_EQ_EQ_FP32 , GxB_ANY_EQ_FP32 ,
GxB_LOR_EQ_FP64 , GxB_LAND_EQ_FP64 , GxB_LXOR_EQ_FP64 , GxB_EQ_EQ_FP64 , GxB_ANY_EQ_FP64 ,
// semirings with multiply op: z = NE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_NE_INT8 , GxB_LAND_NE_INT8 , GxB_LXOR_NE_INT8 , GxB_EQ_NE_INT8 , GxB_ANY_NE_INT8 ,
GxB_LOR_NE_INT16 , GxB_LAND_NE_INT16 , GxB_LXOR_NE_INT16 , GxB_EQ_NE_INT16 , GxB_ANY_NE_INT16 ,
GxB_LOR_NE_INT32 , GxB_LAND_NE_INT32 , GxB_LXOR_NE_INT32 , GxB_EQ_NE_INT32 , GxB_ANY_NE_INT32 ,
GxB_LOR_NE_INT64 , GxB_LAND_NE_INT64 , GxB_LXOR_NE_INT64 , GxB_EQ_NE_INT64 , GxB_ANY_NE_INT64 ,
GxB_LOR_NE_UINT8 , GxB_LAND_NE_UINT8 , GxB_LXOR_NE_UINT8 , GxB_EQ_NE_UINT8 , GxB_ANY_NE_UINT8 ,
GxB_LOR_NE_UINT16 , GxB_LAND_NE_UINT16 , GxB_LXOR_NE_UINT16 , GxB_EQ_NE_UINT16 , GxB_ANY_NE_UINT16 ,
GxB_LOR_NE_UINT32 , GxB_LAND_NE_UINT32 , GxB_LXOR_NE_UINT32 , GxB_EQ_NE_UINT32 , GxB_ANY_NE_UINT32 ,
GxB_LOR_NE_UINT64 , GxB_LAND_NE_UINT64 , GxB_LXOR_NE_UINT64 , GxB_EQ_NE_UINT64 , GxB_ANY_NE_UINT64 ,
GxB_LOR_NE_FP32 , GxB_LAND_NE_FP32 , GxB_LXOR_NE_FP32 , GxB_EQ_NE_FP32 , GxB_ANY_NE_FP32 ,
GxB_LOR_NE_FP64 , GxB_LAND_NE_FP64 , GxB_LXOR_NE_FP64 , GxB_EQ_NE_FP64 , GxB_ANY_NE_FP64 ,
// semirings with multiply op: z = GT (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_GT_INT8 , GxB_LAND_GT_INT8 , GxB_LXOR_GT_INT8 , GxB_EQ_GT_INT8 , GxB_ANY_GT_INT8 ,
GxB_LOR_GT_INT16 , GxB_LAND_GT_INT16 , GxB_LXOR_GT_INT16 , GxB_EQ_GT_INT16 , GxB_ANY_GT_INT16 ,
GxB_LOR_GT_INT32 , GxB_LAND_GT_INT32 , GxB_LXOR_GT_INT32 , GxB_EQ_GT_INT32 , GxB_ANY_GT_INT32 ,
GxB_LOR_GT_INT64 , GxB_LAND_GT_INT64 , GxB_LXOR_GT_INT64 , GxB_EQ_GT_INT64 , GxB_ANY_GT_INT64 ,
GxB_LOR_GT_UINT8 , GxB_LAND_GT_UINT8 , GxB_LXOR_GT_UINT8 , GxB_EQ_GT_UINT8 , GxB_ANY_GT_UINT8 ,
GxB_LOR_GT_UINT16 , GxB_LAND_GT_UINT16 , GxB_LXOR_GT_UINT16 , GxB_EQ_GT_UINT16 , GxB_ANY_GT_UINT16 ,
GxB_LOR_GT_UINT32 , GxB_LAND_GT_UINT32 , GxB_LXOR_GT_UINT32 , GxB_EQ_GT_UINT32 , GxB_ANY_GT_UINT32 ,
GxB_LOR_GT_UINT64 , GxB_LAND_GT_UINT64 , GxB_LXOR_GT_UINT64 , GxB_EQ_GT_UINT64 , GxB_ANY_GT_UINT64 ,
GxB_LOR_GT_FP32 , GxB_LAND_GT_FP32 , GxB_LXOR_GT_FP32 , GxB_EQ_GT_FP32 , GxB_ANY_GT_FP32 ,
GxB_LOR_GT_FP64 , GxB_LAND_GT_FP64 , GxB_LXOR_GT_FP64 , GxB_EQ_GT_FP64 , GxB_ANY_GT_FP64 ,
// semirings with multiply op: z = LT (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_LT_INT8 , GxB_LAND_LT_INT8 , GxB_LXOR_LT_INT8 , GxB_EQ_LT_INT8 , GxB_ANY_LT_INT8 ,
GxB_LOR_LT_INT16 , GxB_LAND_LT_INT16 , GxB_LXOR_LT_INT16 , GxB_EQ_LT_INT16 , GxB_ANY_LT_INT16 ,
GxB_LOR_LT_INT32 , GxB_LAND_LT_INT32 , GxB_LXOR_LT_INT32 , GxB_EQ_LT_INT32 , GxB_ANY_LT_INT32 ,
GxB_LOR_LT_INT64 , GxB_LAND_LT_INT64 , GxB_LXOR_LT_INT64 , GxB_EQ_LT_INT64 , GxB_ANY_LT_INT64 ,
GxB_LOR_LT_UINT8 , GxB_LAND_LT_UINT8 , GxB_LXOR_LT_UINT8 , GxB_EQ_LT_UINT8 , GxB_ANY_LT_UINT8 ,
GxB_LOR_LT_UINT16 , GxB_LAND_LT_UINT16 , GxB_LXOR_LT_UINT16 , GxB_EQ_LT_UINT16 , GxB_ANY_LT_UINT16 ,
GxB_LOR_LT_UINT32 , GxB_LAND_LT_UINT32 , GxB_LXOR_LT_UINT32 , GxB_EQ_LT_UINT32 , GxB_ANY_LT_UINT32 ,
GxB_LOR_LT_UINT64 , GxB_LAND_LT_UINT64 , GxB_LXOR_LT_UINT64 , GxB_EQ_LT_UINT64 , GxB_ANY_LT_UINT64 ,
GxB_LOR_LT_FP32 , GxB_LAND_LT_FP32 , GxB_LXOR_LT_FP32 , GxB_EQ_LT_FP32 , GxB_ANY_LT_FP32 ,
GxB_LOR_LT_FP64 , GxB_LAND_LT_FP64 , GxB_LXOR_LT_FP64 , GxB_EQ_LT_FP64 , GxB_ANY_LT_FP64 ,
// semirings with multiply op: z = GE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_GE_INT8 , GxB_LAND_GE_INT8 , GxB_LXOR_GE_INT8 , GxB_EQ_GE_INT8 , GxB_ANY_GE_INT8 ,
GxB_LOR_GE_INT16 , GxB_LAND_GE_INT16 , GxB_LXOR_GE_INT16 , GxB_EQ_GE_INT16 , GxB_ANY_GE_INT16 ,
GxB_LOR_GE_INT32 , GxB_LAND_GE_INT32 , GxB_LXOR_GE_INT32 , GxB_EQ_GE_INT32 , GxB_ANY_GE_INT32 ,
GxB_LOR_GE_INT64 , GxB_LAND_GE_INT64 , GxB_LXOR_GE_INT64 , GxB_EQ_GE_INT64 , GxB_ANY_GE_INT64 ,
GxB_LOR_GE_UINT8 , GxB_LAND_GE_UINT8 , GxB_LXOR_GE_UINT8 , GxB_EQ_GE_UINT8 , GxB_ANY_GE_UINT8 ,
GxB_LOR_GE_UINT16 , GxB_LAND_GE_UINT16 , GxB_LXOR_GE_UINT16 , GxB_EQ_GE_UINT16 , GxB_ANY_GE_UINT16 ,
GxB_LOR_GE_UINT32 , GxB_LAND_GE_UINT32 , GxB_LXOR_GE_UINT32 , GxB_EQ_GE_UINT32 , GxB_ANY_GE_UINT32 ,
GxB_LOR_GE_UINT64 , GxB_LAND_GE_UINT64 , GxB_LXOR_GE_UINT64 , GxB_EQ_GE_UINT64 , GxB_ANY_GE_UINT64 ,
GxB_LOR_GE_FP32 , GxB_LAND_GE_FP32 , GxB_LXOR_GE_FP32 , GxB_EQ_GE_FP32 , GxB_ANY_GE_FP32 ,
GxB_LOR_GE_FP64 , GxB_LAND_GE_FP64 , GxB_LXOR_GE_FP64 , GxB_EQ_GE_FP64 , GxB_ANY_GE_FP64 ,
// semirings with multiply op: z = LE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_LE_INT8 , GxB_LAND_LE_INT8 , GxB_LXOR_LE_INT8 , GxB_EQ_LE_INT8 , GxB_ANY_LE_INT8 ,
GxB_LOR_LE_INT16 , GxB_LAND_LE_INT16 , GxB_LXOR_LE_INT16 , GxB_EQ_LE_INT16 , GxB_ANY_LE_INT16 ,
GxB_LOR_LE_INT32 , GxB_LAND_LE_INT32 , GxB_LXOR_LE_INT32 , GxB_EQ_LE_INT32 , GxB_ANY_LE_INT32 ,
GxB_LOR_LE_INT64 , GxB_LAND_LE_INT64 , GxB_LXOR_LE_INT64 , GxB_EQ_LE_INT64 , GxB_ANY_LE_INT64 ,
GxB_LOR_LE_UINT8 , GxB_LAND_LE_UINT8 , GxB_LXOR_LE_UINT8 , GxB_EQ_LE_UINT8 , GxB_ANY_LE_UINT8 ,
GxB_LOR_LE_UINT16 , GxB_LAND_LE_UINT16 , GxB_LXOR_LE_UINT16 , GxB_EQ_LE_UINT16 , GxB_ANY_LE_UINT16 ,
GxB_LOR_LE_UINT32 , GxB_LAND_LE_UINT32 , GxB_LXOR_LE_UINT32 , GxB_EQ_LE_UINT32 , GxB_ANY_LE_UINT32 ,
GxB_LOR_LE_UINT64 , GxB_LAND_LE_UINT64 , GxB_LXOR_LE_UINT64 , GxB_EQ_LE_UINT64 , GxB_ANY_LE_UINT64 ,
GxB_LOR_LE_FP32 , GxB_LAND_LE_FP32 , GxB_LXOR_LE_FP32 , GxB_EQ_LE_FP32 , GxB_ANY_LE_FP32 ,
GxB_LOR_LE_FP64 , GxB_LAND_LE_FP64 , GxB_LXOR_LE_FP64 , GxB_EQ_LE_FP64 , GxB_ANY_LE_FP64 ,
//------------------------------------------------------------------------------
// 55 semirings with purely Boolean types, bool x bool -> bool
//------------------------------------------------------------------------------
// Note that lor_pair, land_pair, and eq_pair are all identical to any_pair.
// These 3 are marked below. GxB_EQ_*_BOOL could be called
// GxB_LXNOR_*_BOOL, and GxB_*_EQ_BOOL could be called GxB_*_LXNOR_BOOL,
// but those names are not included.
// purely boolean semirings in the form GxB_(add monoid)_(multiply operator)_BOOL:
GxB_LOR_FIRST_BOOL , GxB_LAND_FIRST_BOOL , GxB_LXOR_FIRST_BOOL , GxB_EQ_FIRST_BOOL , GxB_ANY_FIRST_BOOL ,
GxB_LOR_SECOND_BOOL , GxB_LAND_SECOND_BOOL , GxB_LXOR_SECOND_BOOL , GxB_EQ_SECOND_BOOL , GxB_ANY_SECOND_BOOL ,
GxB_LOR_PAIR_BOOL/**/ , GxB_LAND_PAIR_BOOL/**/ , GxB_LXOR_PAIR_BOOL , GxB_EQ_PAIR_BOOL/**/ , GxB_ANY_PAIR_BOOL ,
GxB_LOR_LOR_BOOL , GxB_LAND_LOR_BOOL , GxB_LXOR_LOR_BOOL , GxB_EQ_LOR_BOOL , GxB_ANY_LOR_BOOL ,
GxB_LOR_LAND_BOOL , GxB_LAND_LAND_BOOL , GxB_LXOR_LAND_BOOL , GxB_EQ_LAND_BOOL , GxB_ANY_LAND_BOOL ,
GxB_LOR_LXOR_BOOL , GxB_LAND_LXOR_BOOL , GxB_LXOR_LXOR_BOOL , GxB_EQ_LXOR_BOOL , GxB_ANY_LXOR_BOOL ,
GxB_LOR_EQ_BOOL , GxB_LAND_EQ_BOOL , GxB_LXOR_EQ_BOOL , GxB_EQ_EQ_BOOL , GxB_ANY_EQ_BOOL ,
GxB_LOR_GT_BOOL , GxB_LAND_GT_BOOL , GxB_LXOR_GT_BOOL , GxB_EQ_GT_BOOL , GxB_ANY_GT_BOOL ,
GxB_LOR_LT_BOOL , GxB_LAND_LT_BOOL , GxB_LXOR_LT_BOOL , GxB_EQ_LT_BOOL , GxB_ANY_LT_BOOL ,
GxB_LOR_GE_BOOL , GxB_LAND_GE_BOOL , GxB_LXOR_GE_BOOL , GxB_EQ_GE_BOOL , GxB_ANY_GE_BOOL ,
GxB_LOR_LE_BOOL , GxB_LAND_LE_BOOL , GxB_LXOR_LE_BOOL , GxB_EQ_LE_BOOL , GxB_ANY_LE_BOOL ,
//------------------------------------------------------------------------------
// 54 complex semirings
//------------------------------------------------------------------------------
// 3 monoids (plus, times, any), 2 types (FC32 and FC64), and 9
// multiplicative operators.
// Note that times_pair is identical to any_pair.
// These 2 are marked below.
GxB_PLUS_FIRST_FC32 , GxB_TIMES_FIRST_FC32 , GxB_ANY_FIRST_FC32 ,
GxB_PLUS_FIRST_FC64 , GxB_TIMES_FIRST_FC64 , GxB_ANY_FIRST_FC64 ,
GxB_PLUS_SECOND_FC32 , GxB_TIMES_SECOND_FC32 , GxB_ANY_SECOND_FC32 ,
GxB_PLUS_SECOND_FC64 , GxB_TIMES_SECOND_FC64 , GxB_ANY_SECOND_FC64 ,
GxB_PLUS_PAIR_FC32 , GxB_TIMES_PAIR_FC32/**/, GxB_ANY_PAIR_FC32 ,
GxB_PLUS_PAIR_FC64 , GxB_TIMES_PAIR_FC64/**/, GxB_ANY_PAIR_FC64 ,
GxB_PLUS_PLUS_FC32 , GxB_TIMES_PLUS_FC32 , GxB_ANY_PLUS_FC32 ,
GxB_PLUS_PLUS_FC64 , GxB_TIMES_PLUS_FC64 , GxB_ANY_PLUS_FC64 ,
GxB_PLUS_MINUS_FC32 , GxB_TIMES_MINUS_FC32 , GxB_ANY_MINUS_FC32 ,
GxB_PLUS_MINUS_FC64 , GxB_TIMES_MINUS_FC64 , GxB_ANY_MINUS_FC64 ,
GxB_PLUS_TIMES_FC32 , GxB_TIMES_TIMES_FC32 , GxB_ANY_TIMES_FC32 ,
GxB_PLUS_TIMES_FC64 , GxB_TIMES_TIMES_FC64 , GxB_ANY_TIMES_FC64 ,
GxB_PLUS_DIV_FC32 , GxB_TIMES_DIV_FC32 , GxB_ANY_DIV_FC32 ,
GxB_PLUS_DIV_FC64 , GxB_TIMES_DIV_FC64 , GxB_ANY_DIV_FC64 ,
GxB_PLUS_RDIV_FC32 , GxB_TIMES_RDIV_FC32 , GxB_ANY_RDIV_FC32 ,
GxB_PLUS_RDIV_FC64 , GxB_TIMES_RDIV_FC64 , GxB_ANY_RDIV_FC64 ,
GxB_PLUS_RMINUS_FC32 , GxB_TIMES_RMINUS_FC32 , GxB_ANY_RMINUS_FC32 ,
GxB_PLUS_RMINUS_FC64 , GxB_TIMES_RMINUS_FC64 , GxB_ANY_RMINUS_FC64 ,
//------------------------------------------------------------------------------
// 64 bitwise semirings
//------------------------------------------------------------------------------
// monoids: (BOR, BAND, BXOR, BXNOR) x
// mult: (BOR, BAND, BXOR, BXNOR) x
// types: (UINT8, UINT16, UINT32, UINT64)
GxB_BOR_BOR_UINT8 , GxB_BOR_BOR_UINT16 , GxB_BOR_BOR_UINT32 , GxB_BOR_BOR_UINT64 ,
GxB_BOR_BAND_UINT8 , GxB_BOR_BAND_UINT16 , GxB_BOR_BAND_UINT32 , GxB_BOR_BAND_UINT64 ,
GxB_BOR_BXOR_UINT8 , GxB_BOR_BXOR_UINT16 , GxB_BOR_BXOR_UINT32 , GxB_BOR_BXOR_UINT64 ,
GxB_BOR_BXNOR_UINT8 , GxB_BOR_BXNOR_UINT16 , GxB_BOR_BXNOR_UINT32 , GxB_BOR_BXNOR_UINT64 ,
GxB_BAND_BOR_UINT8 , GxB_BAND_BOR_UINT16 , GxB_BAND_BOR_UINT32 , GxB_BAND_BOR_UINT64 ,
GxB_BAND_BAND_UINT8 , GxB_BAND_BAND_UINT16 , GxB_BAND_BAND_UINT32 , GxB_BAND_BAND_UINT64 ,
GxB_BAND_BXOR_UINT8 , GxB_BAND_BXOR_UINT16 , GxB_BAND_BXOR_UINT32 , GxB_BAND_BXOR_UINT64 ,
GxB_BAND_BXNOR_UINT8 , GxB_BAND_BXNOR_UINT16 , GxB_BAND_BXNOR_UINT32 , GxB_BAND_BXNOR_UINT64 ,
GxB_BXOR_BOR_UINT8 , GxB_BXOR_BOR_UINT16 , GxB_BXOR_BOR_UINT32 , GxB_BXOR_BOR_UINT64 ,
GxB_BXOR_BAND_UINT8 , GxB_BXOR_BAND_UINT16 , GxB_BXOR_BAND_UINT32 , GxB_BXOR_BAND_UINT64 ,
GxB_BXOR_BXOR_UINT8 , GxB_BXOR_BXOR_UINT16 , GxB_BXOR_BXOR_UINT32 , GxB_BXOR_BXOR_UINT64 ,
GxB_BXOR_BXNOR_UINT8 , GxB_BXOR_BXNOR_UINT16 , GxB_BXOR_BXNOR_UINT32 , GxB_BXOR_BXNOR_UINT64 ,
GxB_BXNOR_BOR_UINT8 , GxB_BXNOR_BOR_UINT16 , GxB_BXNOR_BOR_UINT32 , GxB_BXNOR_BOR_UINT64 ,
GxB_BXNOR_BAND_UINT8 , GxB_BXNOR_BAND_UINT16 , GxB_BXNOR_BAND_UINT32 , GxB_BXNOR_BAND_UINT64 ,
GxB_BXNOR_BXOR_UINT8 , GxB_BXNOR_BXOR_UINT16 , GxB_BXNOR_BXOR_UINT32 , GxB_BXNOR_BXOR_UINT64 ,
GxB_BXNOR_BXNOR_UINT8 , GxB_BXNOR_BXNOR_UINT16 , GxB_BXNOR_BXNOR_UINT32 , GxB_BXNOR_BXNOR_UINT64 ,
//------------------------------------------------------------------------------
// 80 positional semirings
//------------------------------------------------------------------------------
// monoids: (MIN, MAX, ANY, PLUS, TIMES) x
// mult: (FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, SECONDI, SECONDI1, SECONDJ, SECONDJ1)
// types: (INT32, INT64)
GxB_MIN_FIRSTI_INT32, GxB_MIN_FIRSTI_INT64,
GxB_MAX_FIRSTI_INT32, GxB_MAX_FIRSTI_INT64,
GxB_ANY_FIRSTI_INT32, GxB_ANY_FIRSTI_INT64,
GxB_PLUS_FIRSTI_INT32, GxB_PLUS_FIRSTI_INT64,
GxB_TIMES_FIRSTI_INT32, GxB_TIMES_FIRSTI_INT64,
GxB_MIN_FIRSTI1_INT32, GxB_MIN_FIRSTI1_INT64,
GxB_MAX_FIRSTI1_INT32, GxB_MAX_FIRSTI1_INT64,
GxB_ANY_FIRSTI1_INT32, GxB_ANY_FIRSTI1_INT64,
GxB_PLUS_FIRSTI1_INT32, GxB_PLUS_FIRSTI1_INT64,
GxB_TIMES_FIRSTI1_INT32, GxB_TIMES_FIRSTI1_INT64,
GxB_MIN_FIRSTJ_INT32, GxB_MIN_FIRSTJ_INT64,
GxB_MAX_FIRSTJ_INT32, GxB_MAX_FIRSTJ_INT64,
GxB_ANY_FIRSTJ_INT32, GxB_ANY_FIRSTJ_INT64,
GxB_PLUS_FIRSTJ_INT32, GxB_PLUS_FIRSTJ_INT64,
GxB_TIMES_FIRSTJ_INT32, GxB_TIMES_FIRSTJ_INT64,
GxB_MIN_FIRSTJ1_INT32, GxB_MIN_FIRSTJ1_INT64,
GxB_MAX_FIRSTJ1_INT32, GxB_MAX_FIRSTJ1_INT64,
GxB_ANY_FIRSTJ1_INT32, GxB_ANY_FIRSTJ1_INT64,
GxB_PLUS_FIRSTJ1_INT32, GxB_PLUS_FIRSTJ1_INT64,
GxB_TIMES_FIRSTJ1_INT32, GxB_TIMES_FIRSTJ1_INT64,
GxB_MIN_SECONDI_INT32, GxB_MIN_SECONDI_INT64,
GxB_MAX_SECONDI_INT32, GxB_MAX_SECONDI_INT64,
GxB_ANY_SECONDI_INT32, GxB_ANY_SECONDI_INT64,
GxB_PLUS_SECONDI_INT32, GxB_PLUS_SECONDI_INT64,
GxB_TIMES_SECONDI_INT32, GxB_TIMES_SECONDI_INT64,
GxB_MIN_SECONDI1_INT32, GxB_MIN_SECONDI1_INT64,
GxB_MAX_SECONDI1_INT32, GxB_MAX_SECONDI1_INT64,
GxB_ANY_SECONDI1_INT32, GxB_ANY_SECONDI1_INT64,
GxB_PLUS_SECONDI1_INT32, GxB_PLUS_SECONDI1_INT64,
GxB_TIMES_SECONDI1_INT32, GxB_TIMES_SECONDI1_INT64,
GxB_MIN_SECONDJ_INT32, GxB_MIN_SECONDJ_INT64,
GxB_MAX_SECONDJ_INT32, GxB_MAX_SECONDJ_INT64,
GxB_ANY_SECONDJ_INT32, GxB_ANY_SECONDJ_INT64,
GxB_PLUS_SECONDJ_INT32, GxB_PLUS_SECONDJ_INT64,
GxB_TIMES_SECONDJ_INT32, GxB_TIMES_SECONDJ_INT64,
GxB_MIN_SECONDJ1_INT32, GxB_MIN_SECONDJ1_INT64,
GxB_MAX_SECONDJ1_INT32, GxB_MAX_SECONDJ1_INT64,
GxB_ANY_SECONDJ1_INT32, GxB_ANY_SECONDJ1_INT64,
GxB_PLUS_SECONDJ1_INT32, GxB_PLUS_SECONDJ1_INT64,
GxB_TIMES_SECONDJ1_INT32, GxB_TIMES_SECONDJ1_INT64 ;
//------------------------------------------------------------------------------
// GrB_* semirings
//------------------------------------------------------------------------------
// The v1.3 C API for GraphBLAS adds the following 124 predefined semirings,
// with GrB_* names. They are identical to 124 GxB_* semirings defined above,
// with the same name, except that GrB_LXNOR_LOR_SEMIRING_BOOL is identical to
// GxB_EQ_LOR_BOOL (since GrB_EQ_BOOL == GrB_LXNOR). The old names are listed
// below alongside each new name; the new GrB_* names are preferred.
// 12 kinds of GrB_* semirings are available for all 10 real non-boolean types:
// PLUS_TIMES, PLUS_MIN,
// MIN_PLUS, MIN_TIMES, MIN_FIRST, MIN_SECOND, MIN_MAX,
// MAX_PLUS, MAX_TIMES, MAX_FIRST, MAX_SECOND, MAX_MIN
// and 4 semirings for boolean only:
// LOR_LAND, LAND_LOR, LXOR_LAND, LXNOR_LOR.
// GxB_* semirings corresponding to the equivalent GrB_* semiring are
// historical.
GB_PUBLIC GrB_Semiring
//--------------------------------------------------------------------------
// 20 semirings with PLUS monoids
//--------------------------------------------------------------------------
// PLUS_TIMES semirings for all 10 real, non-boolean types:
GrB_PLUS_TIMES_SEMIRING_INT8, // GxB_PLUS_TIMES_INT8
GrB_PLUS_TIMES_SEMIRING_INT16, // GxB_PLUS_TIMES_INT16
GrB_PLUS_TIMES_SEMIRING_INT32, // GxB_PLUS_TIMES_INT32
GrB_PLUS_TIMES_SEMIRING_INT64, // GxB_PLUS_TIMES_INT64
GrB_PLUS_TIMES_SEMIRING_UINT8, // GxB_PLUS_TIMES_UINT8
GrB_PLUS_TIMES_SEMIRING_UINT16, // GxB_PLUS_TIMES_UINT16
GrB_PLUS_TIMES_SEMIRING_UINT32, // GxB_PLUS_TIMES_UINT32
GrB_PLUS_TIMES_SEMIRING_UINT64, // GxB_PLUS_TIMES_UINT64
GrB_PLUS_TIMES_SEMIRING_FP32, // GxB_PLUS_TIMES_FP32
GrB_PLUS_TIMES_SEMIRING_FP64, // GxB_PLUS_TIMES_FP64
// PLUS_MIN semirings for all 10 real, non-boolean types:
GrB_PLUS_MIN_SEMIRING_INT8, // GxB_PLUS_MIN_INT8
GrB_PLUS_MIN_SEMIRING_INT16, // GxB_PLUS_MIN_INT16
GrB_PLUS_MIN_SEMIRING_INT32, // GxB_PLUS_MIN_INT32
GrB_PLUS_MIN_SEMIRING_INT64, // GxB_PLUS_MIN_INT64
GrB_PLUS_MIN_SEMIRING_UINT8, // GxB_PLUS_MIN_UINT8
GrB_PLUS_MIN_SEMIRING_UINT16, // GxB_PLUS_MIN_UINT16
GrB_PLUS_MIN_SEMIRING_UINT32, // GxB_PLUS_MIN_UINT32
GrB_PLUS_MIN_SEMIRING_UINT64, // GxB_PLUS_MIN_UINT64
GrB_PLUS_MIN_SEMIRING_FP32, // GxB_PLUS_MIN_FP32
GrB_PLUS_MIN_SEMIRING_FP64, // GxB_PLUS_MIN_FP64
//--------------------------------------------------------------------------
// 50 semirings with MIN monoids
//--------------------------------------------------------------------------
// MIN_PLUS semirings for all 10 real, non-boolean types:
GrB_MIN_PLUS_SEMIRING_INT8, // GxB_MIN_PLUS_INT8
GrB_MIN_PLUS_SEMIRING_INT16, // GxB_MIN_PLUS_INT16
GrB_MIN_PLUS_SEMIRING_INT32, // GxB_MIN_PLUS_INT32
GrB_MIN_PLUS_SEMIRING_INT64, // GxB_MIN_PLUS_INT64
GrB_MIN_PLUS_SEMIRING_UINT8, // GxB_MIN_PLUS_UINT8
GrB_MIN_PLUS_SEMIRING_UINT16, // GxB_MIN_PLUS_UINT16
GrB_MIN_PLUS_SEMIRING_UINT32, // GxB_MIN_PLUS_UINT32
GrB_MIN_PLUS_SEMIRING_UINT64, // GxB_MIN_PLUS_UINT64
GrB_MIN_PLUS_SEMIRING_FP32, // GxB_MIN_PLUS_FP32
GrB_MIN_PLUS_SEMIRING_FP64, // GxB_MIN_PLUS_FP64
// MIN_TIMES semirings for all 10 real, non-boolean types:
GrB_MIN_TIMES_SEMIRING_INT8, // GxB_MIN_TIMES_INT8
GrB_MIN_TIMES_SEMIRING_INT16, // GxB_MIN_TIMES_INT16
GrB_MIN_TIMES_SEMIRING_INT32, // GxB_MIN_TIMES_INT32
GrB_MIN_TIMES_SEMIRING_INT64, // GxB_MIN_TIMES_INT64
GrB_MIN_TIMES_SEMIRING_UINT8, // GxB_MIN_TIMES_UINT8
GrB_MIN_TIMES_SEMIRING_UINT16, // GxB_MIN_TIMES_UINT16
GrB_MIN_TIMES_SEMIRING_UINT32, // GxB_MIN_TIMES_UINT32
GrB_MIN_TIMES_SEMIRING_UINT64, // GxB_MIN_TIMES_UINT64
GrB_MIN_TIMES_SEMIRING_FP32, // GxB_MIN_TIMES_FP32
GrB_MIN_TIMES_SEMIRING_FP64, // GxB_MIN_TIMES_FP64
// MIN_FIRST semirings for all 10 real, non-boolean types:
GrB_MIN_FIRST_SEMIRING_INT8, // GxB_MIN_FIRST_INT8
GrB_MIN_FIRST_SEMIRING_INT16, // GxB_MIN_FIRST_INT16
GrB_MIN_FIRST_SEMIRING_INT32, // GxB_MIN_FIRST_INT32
GrB_MIN_FIRST_SEMIRING_INT64, // GxB_MIN_FIRST_INT64
GrB_MIN_FIRST_SEMIRING_UINT8, // GxB_MIN_FIRST_UINT8
GrB_MIN_FIRST_SEMIRING_UINT16, // GxB_MIN_FIRST_UINT16
GrB_MIN_FIRST_SEMIRING_UINT32, // GxB_MIN_FIRST_UINT32
GrB_MIN_FIRST_SEMIRING_UINT64, // GxB_MIN_FIRST_UINT64
GrB_MIN_FIRST_SEMIRING_FP32, // GxB_MIN_FIRST_FP32
GrB_MIN_FIRST_SEMIRING_FP64, // GxB_MIN_FIRST_FP64
// MIN_SECOND semirings for all 10 real, non-boolean types:
GrB_MIN_SECOND_SEMIRING_INT8, // GxB_MIN_SECOND_INT8
GrB_MIN_SECOND_SEMIRING_INT16, // GxB_MIN_SECOND_INT16
GrB_MIN_SECOND_SEMIRING_INT32, // GxB_MIN_SECOND_INT32
GrB_MIN_SECOND_SEMIRING_INT64, // GxB_MIN_SECOND_INT64
GrB_MIN_SECOND_SEMIRING_UINT8, // GxB_MIN_SECOND_UINT8
GrB_MIN_SECOND_SEMIRING_UINT16, // GxB_MIN_SECOND_UINT16
GrB_MIN_SECOND_SEMIRING_UINT32, // GxB_MIN_SECOND_UINT32
GrB_MIN_SECOND_SEMIRING_UINT64, // GxB_MIN_SECOND_UINT64
GrB_MIN_SECOND_SEMIRING_FP32, // GxB_MIN_SECOND_FP32
GrB_MIN_SECOND_SEMIRING_FP64, // GxB_MIN_SECOND_FP64
// MIN_MAX semirings for all 10 real, non-boolean types:
GrB_MIN_MAX_SEMIRING_INT8, // GxB_MIN_MAX_INT8
GrB_MIN_MAX_SEMIRING_INT16, // GxB_MIN_MAX_INT16
GrB_MIN_MAX_SEMIRING_INT32, // GxB_MIN_MAX_INT32
GrB_MIN_MAX_SEMIRING_INT64, // GxB_MIN_MAX_INT64
GrB_MIN_MAX_SEMIRING_UINT8, // GxB_MIN_MAX_UINT8
GrB_MIN_MAX_SEMIRING_UINT16, // GxB_MIN_MAX_UINT16
GrB_MIN_MAX_SEMIRING_UINT32, // GxB_MIN_MAX_UINT32
GrB_MIN_MAX_SEMIRING_UINT64, // GxB_MIN_MAX_UINT64
GrB_MIN_MAX_SEMIRING_FP32, // GxB_MIN_MAX_FP32
GrB_MIN_MAX_SEMIRING_FP64, // GxB_MIN_MAX_FP64
//--------------------------------------------------------------------------
// 50 semirings with MAX monoids
//--------------------------------------------------------------------------
// MAX_PLUS semirings for all 10 real, non-boolean types
GrB_MAX_PLUS_SEMIRING_INT8, // GxB_MAX_PLUS_INT8
GrB_MAX_PLUS_SEMIRING_INT16, // GxB_MAX_PLUS_INT16
GrB_MAX_PLUS_SEMIRING_INT32, // GxB_MAX_PLUS_INT32
GrB_MAX_PLUS_SEMIRING_INT64, // GxB_MAX_PLUS_INT64
GrB_MAX_PLUS_SEMIRING_UINT8, // GxB_MAX_PLUS_UINT8
GrB_MAX_PLUS_SEMIRING_UINT16, // GxB_MAX_PLUS_UINT16
GrB_MAX_PLUS_SEMIRING_UINT32, // GxB_MAX_PLUS_UINT32
GrB_MAX_PLUS_SEMIRING_UINT64, // GxB_MAX_PLUS_UINT64
GrB_MAX_PLUS_SEMIRING_FP32, // GxB_MAX_PLUS_FP32
GrB_MAX_PLUS_SEMIRING_FP64, // GxB_MAX_PLUS_FP64
// MAX_TIMES semirings for all 10 real, non-boolean types:
GrB_MAX_TIMES_SEMIRING_INT8, // GxB_MAX_TIMES_INT8
GrB_MAX_TIMES_SEMIRING_INT16, // GxB_MAX_TIMES_INT16
GrB_MAX_TIMES_SEMIRING_INT32, // GxB_MAX_TIMES_INT32
GrB_MAX_TIMES_SEMIRING_INT64, // GxB_MAX_TIMES_INT64
GrB_MAX_TIMES_SEMIRING_UINT8, // GxB_MAX_TIMES_UINT8
GrB_MAX_TIMES_SEMIRING_UINT16, // GxB_MAX_TIMES_UINT16
GrB_MAX_TIMES_SEMIRING_UINT32, // GxB_MAX_TIMES_UINT32
GrB_MAX_TIMES_SEMIRING_UINT64, // GxB_MAX_TIMES_UINT64
GrB_MAX_TIMES_SEMIRING_FP32, // GxB_MAX_TIMES_FP32
GrB_MAX_TIMES_SEMIRING_FP64, // GxB_MAX_TIMES_FP64
// MAX_FIRST semirings for all 10 real, non-boolean types:
GrB_MAX_FIRST_SEMIRING_INT8, // GxB_MAX_FIRST_INT8
GrB_MAX_FIRST_SEMIRING_INT16, // GxB_MAX_FIRST_INT16
GrB_MAX_FIRST_SEMIRING_INT32, // GxB_MAX_FIRST_INT32
GrB_MAX_FIRST_SEMIRING_INT64, // GxB_MAX_FIRST_INT64
GrB_MAX_FIRST_SEMIRING_UINT8, // GxB_MAX_FIRST_UINT8
GrB_MAX_FIRST_SEMIRING_UINT16, // GxB_MAX_FIRST_UINT16
GrB_MAX_FIRST_SEMIRING_UINT32, // GxB_MAX_FIRST_UINT32
GrB_MAX_FIRST_SEMIRING_UINT64, // GxB_MAX_FIRST_UINT64
GrB_MAX_FIRST_SEMIRING_FP32, // GxB_MAX_FIRST_FP32
GrB_MAX_FIRST_SEMIRING_FP64, // GxB_MAX_FIRST_FP64
// MAX_SECOND semirings for all 10 real, non-boolean types:
GrB_MAX_SECOND_SEMIRING_INT8, // GxB_MAX_SECOND_INT8
GrB_MAX_SECOND_SEMIRING_INT16, // GxB_MAX_SECOND_INT16
GrB_MAX_SECOND_SEMIRING_INT32, // GxB_MAX_SECOND_INT32
GrB_MAX_SECOND_SEMIRING_INT64, // GxB_MAX_SECOND_INT64
GrB_MAX_SECOND_SEMIRING_UINT8, // GxB_MAX_SECOND_UINT8
GrB_MAX_SECOND_SEMIRING_UINT16, // GxB_MAX_SECOND_UINT16
GrB_MAX_SECOND_SEMIRING_UINT32, // GxB_MAX_SECOND_UINT32
GrB_MAX_SECOND_SEMIRING_UINT64, // GxB_MAX_SECOND_UINT64
GrB_MAX_SECOND_SEMIRING_FP32, // GxB_MAX_SECOND_FP32
GrB_MAX_SECOND_SEMIRING_FP64, // GxB_MAX_SECOND_FP64
// MAX_MIN semirings for all 10 real, non-boolean types:
GrB_MAX_MIN_SEMIRING_INT8, // GxB_MAX_MIN_INT8
GrB_MAX_MIN_SEMIRING_INT16, // GxB_MAX_MIN_INT16
GrB_MAX_MIN_SEMIRING_INT32, // GxB_MAX_MIN_INT32
GrB_MAX_MIN_SEMIRING_INT64, // GxB_MAX_MIN_INT64
GrB_MAX_MIN_SEMIRING_UINT8, // GxB_MAX_MIN_UINT8
GrB_MAX_MIN_SEMIRING_UINT16, // GxB_MAX_MIN_UINT16
GrB_MAX_MIN_SEMIRING_UINT32, // GxB_MAX_MIN_UINT32
GrB_MAX_MIN_SEMIRING_UINT64, // GxB_MAX_MIN_UINT64
GrB_MAX_MIN_SEMIRING_FP32, // GxB_MAX_MIN_FP32
GrB_MAX_MIN_SEMIRING_FP64, // GxB_MAX_MIN_FP64
//--------------------------------------------------------------------------
// 4 boolean semirings:
//--------------------------------------------------------------------------
GrB_LOR_LAND_SEMIRING_BOOL, // GxB_LOR_LAND_BOOL
GrB_LAND_LOR_SEMIRING_BOOL, // GxB_LAND_LOR_BOOL
GrB_LXOR_LAND_SEMIRING_BOOL, // GxB_LXOR_LAND_BOOL
GrB_LXNOR_LOR_SEMIRING_BOOL ; // GxB_EQ_LOR_BOOL (note EQ == LXNOR)
//==============================================================================
// GrB_*_resize: change the size of a matrix or vector
//==============================================================================
// If the dimensions decrease, entries that fall outside the resized matrix or
// vector are deleted.
GB_PUBLIC
GrB_Info GrB_Matrix_resize // change the size of a matrix
(
GrB_Matrix C, // matrix to modify
GrB_Index nrows_new, // new number of rows in matrix
GrB_Index ncols_new // new number of columns in matrix
) ;
GB_PUBLIC
GrB_Info GrB_Vector_resize // change the size of a vector
(
GrB_Vector w, // vector to modify
GrB_Index nrows_new // new number of rows in vector
) ;
// GxB_*_resize are identical to the GrB_*resize methods above
GB_PUBLIC
GrB_Info GxB_Matrix_resize // change the size of a matrix (historical)
(
GrB_Matrix C, // matrix to modify
GrB_Index nrows_new, // new number of rows in matrix
GrB_Index ncols_new // new number of columns in matrix
) ;
GB_PUBLIC
GrB_Info GxB_Vector_resize // change the size of a vector (historical)
(
GrB_Vector w, // vector to modify
GrB_Index nrows_new // new number of rows in vector
) ;
// GxB_resize is a generic function for resizing a matrix or vector:
// GrB_Vector_resize (u,nrows_new)
// GrB_Matrix_resize (A,nrows_new,ncols_new)
#if GxB_STDC_VERSION >= 201112L
#define GxB_resize(arg1,...) \
_Generic \
( \
(arg1), \
GrB_Vector : GrB_Vector_resize , \
GrB_Matrix : GrB_Matrix_resize \
) \
(arg1, __VA_ARGS__)
#endif
//==============================================================================
// GxB_fprint and GxB_print: print the contents of a GraphBLAS object
//==============================================================================
// GxB_fprint (object, GxB_Print_Level pr, FILE *f) prints the contents of any
// of the 9 GraphBLAS objects to the file f, and also does an extensive test on
// the object to determine if it is valid. It returns one of the following
// error conditions:
//
// GrB_SUCCESS object is valid
// GrB_UNINITIALIZED_OBJECT object is not initialized
// GrB_INVALID_OBJECT object is not valid
// GrB_NULL_POINTER object is a NULL pointer
// GrB_INVALID_VALUE fprintf returned an I/O error; see the ANSI C
// errno or GrB_error( )for details.
//
// GxB_fprint does not modify the status of any object. If a matrix or vector
// has not been completed, the pending computations are guaranteed to *not* be
// performed by GxB_fprint. The reason is simple. It is possible for a bug in
// the user application (such as accessing memory outside the bounds of an
// array) to mangle the internal content of a GraphBLAS object, and GxB_fprint
// can be a helpful tool to track down this bug. If GxB_fprint attempted to
// complete any computations prior to printing or checking the contents of the
// matrix or vector, then further errors could occur, including a segfault.
//
// The type-specific functions include an additional argument, the name string.
// The name is printed at the beginning of the display (assuming pr is not
// GxB_SILENT) so that the object can be more easily identified in the output.
// For the type-generic methods GxB_fprint and GxB_print, the name string is
// the variable name of the object itself.
//
// If f is NULL, stdout is used; this is not an error condition. If pr is
// outside the bounds 0 to 3, negative values are treated as GxB_SILENT, and
// values > 3 are treated as GxB_COMPLETE. If name is NULL, it is treated as
// the empty string.
//
// GxB_print (object, GxB_Print_Level pr) is the same as GxB_fprint, except
// that it prints the contents with printf instead of fprintf to a file f.
//
// The exact content and format of what is printed is implementation-dependent,
// and will change from version to version of SuiteSparse:GraphBLAS. Do not
// attempt to rely on the exact content or format by trying to parse the
// resulting output via another program. The intent of these functions is to
// produce a report of the object for visual inspection.
typedef enum
{
GxB_SILENT = 0, // nothing is printed, just check the object
GxB_SUMMARY = 1, // print a terse summary
GxB_SHORT = 2, // short description, about 30 entries of a matrix
GxB_COMPLETE = 3, // print the entire contents of the object
GxB_SHORT_VERBOSE = 4, // GxB_SHORT but with "%.15g" for doubles
GxB_COMPLETE_VERBOSE = 5 // GxB_COMPLETE but with "%.15g" for doubles
}
GxB_Print_Level ;
GB_PUBLIC
GrB_Info GxB_Type_fprint // print and check a GrB_Type
(
GrB_Type type, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_fprint // print and check a GrB_UnaryOp
(
GrB_UnaryOp unaryop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_fprint // print and check a GrB_BinaryOp
(
GrB_BinaryOp binaryop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_fprint // print and check a GrB_IndexUnaryOp
(
GrB_IndexUnaryOp op, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_SelectOp_fprint // print and check a GxB_SelectOp
(
GxB_SelectOp selectop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_fprint // print and check a GrB_Monoid
(
GrB_Monoid monoid, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_fprint // print and check a GrB_Semiring
(
GrB_Semiring semiring, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Descriptor_fprint // print and check a GrB_Descriptor
(
GrB_Descriptor descriptor, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_fprint // print and check a GrB_Matrix
(
GrB_Matrix A, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Vector_fprint // print and check a GrB_Vector
(
GrB_Vector v, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_fprint // print and check a GrB_Scalar
(
GrB_Scalar s, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_fprint(object,pr,f) \
_Generic \
( \
(object), \
const GrB_Type : GxB_Type_fprint , \
GrB_Type : GxB_Type_fprint , \
const GrB_UnaryOp : GxB_UnaryOp_fprint , \
GrB_UnaryOp : GxB_UnaryOp_fprint , \
const GrB_BinaryOp : GxB_BinaryOp_fprint , \
GrB_BinaryOp : GxB_BinaryOp_fprint , \
const GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \
GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \
const GxB_SelectOp : GxB_SelectOp_fprint , \
GxB_SelectOp : GxB_SelectOp_fprint , \
const GrB_Monoid : GxB_Monoid_fprint , \
GrB_Monoid : GxB_Monoid_fprint , \
const GrB_Semiring : GxB_Semiring_fprint , \
GrB_Semiring : GxB_Semiring_fprint , \
const GrB_Scalar : GxB_Scalar_fprint , \
GrB_Scalar : GxB_Scalar_fprint , \
const GrB_Vector : GxB_Vector_fprint , \
GrB_Vector : GxB_Vector_fprint , \
const GrB_Matrix : GxB_Matrix_fprint , \
GrB_Matrix : GxB_Matrix_fprint , \
const GrB_Descriptor : GxB_Descriptor_fprint , \
GrB_Descriptor : GxB_Descriptor_fprint \
) \
(object, GB_STR(object), pr, f)
#define GxB_print(object,pr) GxB_fprint(object,pr,NULL)
#endif
//==============================================================================
// Matrix and vector import/export/pack/unpack
//==============================================================================
// The import/export/pack/unpack functions allow the user application to create
// a GrB_Matrix or GrB_Vector object, and to extract its contents, faster and
// with less memory overhead than the GrB_*_build and GrB_*_extractTuples
// functions.
// The semantics of import/export/pack/unpack are the same as the "move
// constructor" in C++. On import, the user provides a set of arrays that have
// been previously allocated via the ANSI C malloc function. The arrays define
// the content of the matrix or vector. Unlike GrB_*_build, the GraphBLAS
// library then takes ownership of the user's input arrays and may either (a)
// incorporate them into its internal data structure for the new GrB_Matrix or
// GrB_Vector, potentially creating the GrB_Matrix or GrB_Vector in constant
// time with no memory copying performed, or (b) if the library does not
// support the import format directly, then it may convert the input to its
// internal format, and then free the user's input arrays. GraphBLAS may also
// choose to use a mix of the two strategies. In either case, the input arrays
// are no longer "owned" by the user application. If A is a GrB_Matrix created
// by an import/pack, the user input arrays are freed no later than GrB_free
// (&A), and may be freed earlier, at the discretion of the GraphBLAS library.
// The data structure of the GrB_Matrix and GrB_Vector remain opaque.
// The export/unpack of a GrB_Matrix or GrB_Vector is symmetric with the import
// operation. The export is destructive, where the GrB_Matrix or GrB_Vector no
// longer exists when the export completes. The GrB_Matrix or GrB_Vector
// exists after an unpack operation, just with no entries. In both export and
// unpack, the user is returned several arrays that contain the matrix or
// vector in the requested format. Ownership of these arrays is given to the
// user application, which is then responsible for freeing them via the ANSI C
// free function. If the output format is supported by the GraphBLAS library,
// then these arrays may be returned to the user application in O(1) time and
// with no memory copying performed. Otherwise, the GraphBLAS library will
// create the output arrays for the user (via the ANSI C malloc function), fill
// them with the GrB_Matrix or GrB_Vector data, and then return the newly
// allocated arrays to the user.
// Eight different formats are provided for import/export. For each format,
// the Ax array has a C-type <type> corresponding to one of the 13 built-in
// types in GraphBLAS (bool, int*_t, uint*_t, float, double, float complex, or
// double complex), or a user-defined type.
// On import/pack, the required user arrays Ah, Ap, Ab, Ai, Aj, and/or Ax must
// be non-NULL pointers to memory space allocated by the ANSI C malloc (or
// calloc, or realloc), unless nzmax is zero (in which case the Ab, Ai, Aj, Ax,
// vb, vi, and vx arrays may all be NULL). For the import, A (or GrB_Vector v)
// is undefined on input, just like GrB_*_new, the GrB_Matrix. If the import
// is successful, the GrB_Matrix A or GrB_Vector v is created, and the pointers
// to the user input arrays have been set to NULL. These user arrays have
// either been incorporated directly into the GrB_Matrix A or GrB_Vector v, in
// which case the user input arrays will eventually be freed by GrB_free (&A),
// or their contents have been copied and the arrays freed. This decision is
// made by the GraphBLAS library itself, and the user application has no
// control over this decision.
// If any of the arrays Ab, Aj, Ai, Ax, vb, vi, or vx have zero size (with
// nzmax of zero), they are allowed to be be NULL pointers on input.
// A matrix or vector may be "iso", where all entries present in the pattern
// have the same value. In this case, the boolean iso flag is true, and the
// corresponding numerical array (Ax for matrices, vx for vectors, below) need
// be only large enough to hold a single value.
// No error checking is performed on the content of the user input arrays. If
// the user input arrays do not conform to the precise specifications above,
// results are undefined. No typecasting of the values of the matrix or vector
// entries is performed on import or export.
// SuiteSparse:GraphBLAS supports all eight formats natively (CSR, CSC,
// HyperCSR, and HyperCSC, BitmapR, BitmapC, FullR, FullC). For vectors, only
// CSC, BitmapC, and FullC formats are used. On import, the all eight formats
// take O(1) time and memory to import. On export, if the GrB_Matrix or
// GrB_Vector is already in this particular format, then the export takes O(1)
// time and no memory copying is performed.
// If the import is not successful, the GxB_Matrix_import_* functions return A
// as NULL, GxB_Vector_import returns v as NULL, and the user input arrays are
// neither modified nor freed. They are still owned by the user application.
// If the input data is untrusted, use the following descriptor setting for
// GxB_Matrix_import* and GxB_Matrix_pack*. The import/pack will be slower,
// but secure. GrB_Matrix_import uses the slow, secure method, since it has
// no descriptor input.
//
// GxB_set (desc, GxB_IMPORT, GxB_SECURE_IMPORT) ;
// As of v5.2.0, GxB_*import* and GxB_*export* are declared historical. Use
// GxB_*pack* and GxB_*unpack* instead. The GxB import/export will be kept
// but only documented here, not in the User Guide.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_CSR: pack a CSR matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_CSR // historical: use GxB_Matrix_pack_CSR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_CSR // pack a CSR matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
// CSR: an nrows-by-ncols matrix with nvals entries in CSR format consists
// of 3 arrays, where nvals = Ap [nrows]:
//
// GrB_Index Ap [nrows+1], Aj [nvals] ; <type> Ax [nvals] ;
//
// The column indices of entries in the ith row of the matrix are held
// in Aj [Ap [i] ... Ap[i+1]], and the corresponding values are held
// in the same positions in Ax. Column indices must be in the range 0
// to ncols-1. If jumbled is false, the column indices must appear in
// sorted order within each row. No duplicate column indices may
// appear in any row. Ap [0] must equal zero, and Ap [nrows] must
// equal nvals. The Ap array must be of size nrows+1 (or larger), and
// the Aj and Ax arrays must have size at least nvals. If nvals is
// zero, then the Aj and Ax arrays need not be present and can be
// NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_CSC: pack a CSC matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_CSC // historical: use GxB_Matrix_pack_CSC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_CSC // pack a CSC matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
// CSC: an nrows-by-ncols matrix with nvals entries in CSC format consists
// of 3 arrays, where nvals = Ap [ncols]:
//
// GrB_Index Ap [ncols+1], Ai [nvals] ; <type> Ax [nvals] ;
//
// The row indices of entries in the jth column of the matrix are held
// in Ai [Ap [j] ... Ap[j+1]], and the corresponding values are held
// in the same positions in Ax. Row indices must be in the range 0 to
// nrows-1. If jumbled is false, the row indices must appear in
// sorted order within each column. No duplicate row indices may
// appear in any column. Ap [0] must equal zero, and Ap [ncols] must
// equal nvals. The Ap array must be of size ncols+1 (or larger), and
// the Ai and Ax arrays must have size at least nvals. If nvals is
// zero, then the Ai and Ax arrays need not be present and can be
// NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_HyperCSR: pack a hypersparse CSR matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_HyperCSR // historical: use GxB_Matrix_pack_HyperCSR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of rows that appear in Ah
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_HyperCSR // pack a hypersparse CSR matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of rows that appear in Ah
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
// HyperCSR: an nrows-by-ncols matrix with nvals entries and nvec
// rows that may have entries in HyperCSR format consists of 4 arrays,
// where nvals = Ap [nvec]:
//
// GrB_Index Ah [nvec], Ap [nvec+1], Aj [nvals] ;
// <type> Ax [nvals] ;
//
// The Aj and Ax arrays are the same for a matrix in CSR or HyperCSR
// format. Only Ap and Ah differ.
//
// The Ah array is a list of the row indices of rows that appear in
// the matrix. It
// must appear in sorted order, and no duplicates may appear. If i =
// Ah [k] is the kth row, then the column indices of the ith
// row appear in Aj [Ap [k] ... Ap [k+1]], and the corresponding
// values appear in the same locations in Ax. Column indices must be
// in the range 0 to ncols-1, and must appear in sorted order within
// each row. No duplicate column indices may appear in any row. nvec
// may be zero, to denote an array with no entries. The Ah array must
// be of size at least nvec, Ap must be of size at least nvec+1, and
// Aj and Ax must be at least of size nvals. If nvals is zero, then
// the Aj and Ax arrays need not be present and can be NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_HyperCSC: pack a hypersparse CSC matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_HyperCSC // historical: use GxB_Matrix_pack_HyperCSC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A)*(type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of columns that appear in Ah
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_HyperCSC // pack a hypersparse CSC matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A)*(type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of columns that appear in Ah
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
// HyperCSC: an nrows-by-ncols matrix with nvals entries and nvec
// columns that may have entries in HyperCSC format consists of 4 arrays,
// where nvals = Ap [nvec]:
//
//
// GrB_Index Ah [nvec], Ap [nvec+1], Ai [nvals] ;
// <type> Ax [nvals] ;
//
// The Ai and Ax arrays are the same for a matrix in CSC or HyperCSC
// format. Only Ap and Ah differ.
//
// The Ah array is a list of the column indices of non-empty columns.
// It must appear in sorted order, and no duplicates may appear. If j
// = Ah [k] is the kth non-empty column, then the row indices of the
// jth column appear in Ai [Ap [k] ... Ap [k+1]], and the
// corresponding values appear in the same locations in Ax. Row
// indices must be in the range 0 to nrows-1, and must appear in
// sorted order within each column. No duplicate row indices may
// appear in any column. nvec may be zero, to denote an array with no
// entries. The Ah array must be of size at least nvec, Ap must be of
// size at least nvec+1, and Ai and Ax must be at least of size nvals.
// If nvals is zero, then the Ai and Ax arrays need not be present and
// can be NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_BitmapR: pack a bitmap matrix, held by row
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_BitmapR // historical: use GxB_Matrix_pack_BitmapR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_BitmapR // pack a bitmap matrix, held by row
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// BitmapR: a dense format, but able to represent sparsity structure of A.
//
// int8_t Ab [nrows*ncols] ;
// <type> Ax [nrows*ncols] ;
//
// Ab and Ax are both of size nrows*ncols. Ab [i*ncols+j] = 1 if the
// A(i,j) entry is present with value Ax [i*ncols+j], or 0 if A(i,j)
// is not present. nvals must equal the number of 1's in the Ab
// array.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_BitmapC: pack a bitmap matrix, held by column
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_BitmapC // historical: use GxB_Matrix_pack_BitmapC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_BitmapC // pack a bitmap matrix, held by column
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// BitmapC: a dense format, but able to represent sparsity structure of A.
//
// int8_t Ab [nrows*ncols] ;
// <type> Ax [nrows*ncols] ;
//
// Ab and Ax are both of size nrows*ncols. Ab [i+j*nrows] = 1 if the
// A(i,j) entry is present with value Ax [i+j*nrows], or 0 if A(i,j)
// is not present. nvals must equal the number of 1's in the Ab
// array.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_FullR: pack a full matrix, held by row
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_FullR // historical: use GxB_Matrix_pack_FullR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_FullR // pack a full matrix, held by row
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
// FullR: an nrows-by-ncols full matrix held in row-major order:
//
// <type> Ax [nrows*ncols] ;
//
// Ax is an array of size nrows*ncols, where A(i,j) is held in
// Ax [i*ncols+j]. All entries in A are present.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_FullC: pack a full matrix, held by column
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_FullC // historical: use GxB_Matrix_pack_FullC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_FullC // pack a full matrix, held by column
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
// FullC: an nrows-by-ncols full matrix held in column-major order:
//
// <type> Ax [nrows*ncols] ;
//
// Ax is an array of size nrows*ncols, where A(i,j) is held in
// Ax [i+j*nrows]. All entries in A are present.
//------------------------------------------------------------------------------
// GxB_Vector_pack_CSC: import/pack a vector in CSC format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_CSC // historical: use GxB_Vector_pack_CSC
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vi_size, // size of vi in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in vector
bool jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_CSC // pack a vector in CSC format
(
GrB_Vector v, // vector to create (type and length unchanged)
GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vi_size, // size of vi in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in vector
bool jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in CSC format, except that no vp array is required. If nvals is
// zero, then the vi and vx arrays need not be present and can be NULL.
//------------------------------------------------------------------------------
// GxB_Vector_pack_Bitmap: pack a vector in bitmap format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_Bitmap // historical: GxB_Vector_pack_Bitmap
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
int8_t **vb, // bitmap, vb_size >= n
void **vx, // values, vx_size >= n * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vb_size, // size of vb in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_Bitmap // pack a bitmap vector
(
GrB_Vector v, // vector to create (type and length unchanged)
int8_t **vb, // bitmap, vb_size >= n
void **vx, // values, vx_size >= n * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vb_size, // size of vb in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in BitmapC format.
//------------------------------------------------------------------------------
// GxB_Vector_pack_Full: pack a vector in full format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_Full // historical: use GxB_Vector_pack_Full
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_Full // pack a full vector
(
GrB_Vector v, // vector to create (type and length unchanged)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in FullC format.
//------------------------------------------------------------------------------
// GxB* export/unpack
//------------------------------------------------------------------------------
// The GxB_*_export/unpack functions are symmetric with the GxB_*_import/pack
// functions. The export/unpack functions force completion of any pending
// operations, prior to the export, except if the only pending operation is to
// unjumble the matrix.
//
// If there are no entries in the matrix or vector, then the index arrays (Ai,
// Aj, or vi) and value arrays (Ax or vx) are returned as NULL. This is not an
// error condition.
//
// A GrB_Matrix may be exported/unpacked in any one of four different formats.
// On successful export, the input GrB_Matrix A is freed, and the output arrays
// Ah, Ap, Ai, Aj, and/or Ax are returned to the user application as arrays
// allocated by the ANSI C malloc function. The four formats are the same as
// the import formats for GxB_Matrix_import/pack.
//
// If jumbled is NULL on input, this indicates to GxB_*export/unpack* that the
// exported/unpacked matrix cannot be returned in a jumbled format. In this
// case, if the matrix is jumbled, it is sorted before exporting it to the
// caller.
//
// If iso is NULL on input, this indicates to the export/unpack methods that
// the exported/unpacked matrix cannot be returned in a iso format, with an Ax
// array with just one entry. In this case, if the matrix is iso, it is
// expanded before exporting/unpacking it to the caller.
//
// For the export/unpack*Full* methods, all entries in the matrix or must be
// present. That is, GrB_*_nvals must report nvals equal to nrows*ncols or a
// matrix. If this condition does not hold, the matrix/vector is not exported,
// and GrB_INVALID_VALUE is returned.
//
// If the export/unpack is not successful, the export/unpack functions do not
// modify matrix or vector and the user arrays are returned as NULL.
GB_PUBLIC
GrB_Info GxB_Matrix_export_CSR // historical: use GxB_Matrix_unpack_CSR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers"
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_CSR // unpack a CSR matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers"
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_CSC // historical: use GxB_Matrix_unpack_CSC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // column "pointers"
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_CSC // unpack a CSC matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // column "pointers"
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_HyperCSR // historical: use GxB_Matrix_unpack_HyperCSR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers"
GrB_Index **Ah, // row indices
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of rows that appear in Ah
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_HyperCSR // unpack a hypersparse CSR matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers"
GrB_Index **Ah, // row indices
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of rows that appear in Ah
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_HyperCSC // historical: use GxB_Matrix_unpack_HyperCSC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // column "pointers"
GrB_Index **Ah, // column indices
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of columns that appear in Ah
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_HyperCSC // unpack a hypersparse CSC matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // column "pointers"
GrB_Index **Ah, // column indices
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of columns that appear in Ah
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_BitmapR // historical: use GxB_Matrix_unpack_BitmapR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_BitmapR // unpack a bitmap matrix, by row
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_BitmapC // historical: use GxB_Matrix_unpack_BitmapC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_BitmapC // unpack a bitmap matrix, by col
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FullR // historical: use GxB_Matrix_unpack_FullR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_FullR // unpack a full matrix, by row
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FullC // historical: use GxB_Matrix_unpack_FullC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_FullC // unpack a full matrix, by column
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_CSC // historical: use GxB_Vector_unpack_CSC
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
GrB_Index **vi, // indices
void **vx, // values
GrB_Index *vi_size, // size of vi in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in vector
bool *jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_CSC // unpack a CSC vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
GrB_Index **vi, // indices
void **vx, // values
GrB_Index *vi_size, // size of vi in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in vector
bool *jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_Bitmap // historical: use GxB_Vector_unpack_Bitmap
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
int8_t **vb, // bitmap
void **vx, // values
GrB_Index *vb_size, // size of vb in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_Bitmap // unpack a bitmap vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
int8_t **vb, // bitmap
void **vx, // values
GrB_Index *vb_size, // size of vb in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_Full // historical: use GxB_Vector_unpack_Full
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
void **vx, // values
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_Full // unpack a full vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
void **vx, // values
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
const GrB_Descriptor desc
) ;
//==============================================================================
// GrB import/export
//==============================================================================
// The GrB_Matrix_import method copies from user-provided arrays into an
// opaque GrB_Matrix and GrB_Matrix_export copies data out, from an opaque
// GrB_Matrix into user-provided arrays. Unlike the GxB pack/unpack methods,
// memory is not handed off between the user application and GraphBLAS.
// These methods are much slower than the GxB pack/unpack methods, since they
// require a copy of the data to be made. GrB_Matrix_import also must assume
// its input data cannot be trusted, and so it does extensive checks. The GxB
// pack takes O(1) time in all cases (unless it is told the input data is
// untrusted, via the descriptor). GxB unpack takes O(1) time unless the
// matrix is exported in a different format than it currently has.
// No typecasting of the values is done on import or export.
// The GrB C API specification supports 3 formats:
typedef enum
{
GrB_CSR_FORMAT = 0, // CSR format (equiv to GxB_SPARSE with GxB_BY_ROW)
GrB_CSC_FORMAT = 1, // CSC format (equiv to GxB_SPARSE with GxB_BY_COL)
GrB_COO_FORMAT = 2 // triplet format (like input to GrB*build)
}
GrB_Format ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_BOOL // import a GrB_BOOL matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_BOOL)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const bool *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT8 // import a GrB_INT8 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_iNT8)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int8_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT16 // import a GrB_INT16 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT16)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int16_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT32 // import a GrB_INT32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT64 // import a GrB_INT64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT8 // import a GrB_UINT8 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT8)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint8_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT16 // import a GrB_UINT16 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT16)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint16_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT32 // import a GrB_UINT32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT64 // import a GrB_UINT64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_FP32 // import a GrB_FP32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_FP32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const float *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_FP64 // import a GrB_FP64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_FP64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const double *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_import_FC32 // import a GxB_FC32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GxB_FC32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const GxB_FC32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_import_FC64 // import a GxB_FC64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GxB_FC64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const GxB_FC64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UDT // import a matrix with a user-defined type
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const void *Ax, // values (must match the type parameter)
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_import(A,type,nrows,ncols,Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt)\
_Generic \
( \
(Ax), \
GB_CASES (*, GrB, Matrix_import) \
) \
(A, type, nrows, ncols, Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt)
#endif
// For GrB_Matrix_export_T: on input, Ap_len, Ai_len, and Ax_len are
// the size of the 3 arrays Ap, Ai, and Ax, in terms of the # of entries.
// On output, these 3 values are modified to be the # of entries copied
// into those 3 arrays.
GB_PUBLIC
GrB_Info GrB_Matrix_export_BOOL // export a GrB_BOOL matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
bool *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_BOOL)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT8 // export a GrB_INT8 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int8_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT8)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT16 // export a GrB_INT16 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int16_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT16)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT32 // export a GrB_INT32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT64 // export a GrB_INT64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT8 // export a GrB_UINT8 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint8_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT8)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT16 // export a GrB_UINT16 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint16_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT16)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT32 // export a GrB_UINT32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT64 // export a GrB_UINT64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_FP32 // export a GrB_FP32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
float *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FP32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_FP64 // export a GrB_FP64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
double *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FP64)
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FC32 // export a GrB_FC32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
GxB_FC32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FC32)
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FC64 // export a GrB_FC64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
GxB_FC64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FC64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UDT // export a matrix with a user-defined type
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
void *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_export(Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt,A) \
_Generic \
( \
(Ax), \
GB_CASES (*, GrB, Matrix_export) \
) \
(Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt, A)
#endif
GB_PUBLIC
GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export
(
GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes)
GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes)
GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_exportHint // suggest the best export format
(
GrB_Format *format, // export format
GrB_Matrix A // matrix to export
) ;
//==============================================================================
// serialize/deserialize
//==============================================================================
// GxB_Matrix_serialize copies the contents of a GrB_Matrix into a single array
// of bytes (the "blob"). The contents of the blob are implementation
// dependent. The blob can be saved to a file, or sent across a communication
// channel, and then a GrB_Matrix can be reconstructed from the blob, even on
// another process or another machine, using the same version of
// SuiteSparse:GraphBLAS (v5.2.0 or later). The goal is that future versions
// of SuiteSparse:GraphBLAS should be able to read in the blob as well, and
// reconstruct a matrix. The matrix can be reconstructed from the blob using
// GxB_Matrix_deserialize. The blob is compressed, by default, and
// uncompressed by GxB_Matrix_deserialize.
// GrB_Matrix_serialize/deserialize are slightly different from their GxB*
// counterparts. The blob is allocated by GxB_Matrix_serialize, and must be
// freed by GxB_serialize_free (which calls the ANSI C11 free if GrB_init was
// used). By contrast, the GrB* methods require the user application to pass
// in a preallocated blob to GrB_Matrix_serialize, whose size can be given by
// GrB_Matrix_serializeSize (as a loose upper bound).
// The GrB* and GxB* methods can be mixed. GrB_Matrix_serialize and
// GxB_Matrix_serialize construct the same blob (assuming they are given the
// same # of threads to do the work). Both GrB_Matrix_deserialize and
// GxB_Matrix_deserialize can deserialize a blob coming from either
// GrB_Matrix_serialize or GxB_Matrix_serialize.
// Deserialization of untrusted data is a common security problem; see
// https://cwe.mitre.org/data/definitions/502.html. The deserialization methods
// below do a few basic checks so that no out-of-bounds access occurs during
// deserialization, but the output matrix itself may still be corrupted. If
// the data is untrusted, use this to check the matrix:
// GxB_Matrix_fprint (A, "A deserialized", GrB_SILENT, NULL)
// Example usage:
/*
//--------------------------------------------------------------------------
// using GxB serialize/deserialize
//--------------------------------------------------------------------------
// Given a GrB_Matrix A: assuming a user-defined type:
void *blob ;
GrB_Index blob_size ;
GxB_Matrix_serialize (&blob, &blob_size, A, NULL) ;
FILE *f = fopen ("myblob", "w") ;
fwrite (blob_size, sizeof (size_t), 1, f) ;
fwrite (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
GrB_Matrix_free (&A) ;
// B is a copy of A
GxB_Matrix_deserialize (&B, MyQtype, blob, blob_size, NULL) ;
GrB_Matrix_free (&B) ;
free (blob) ;
GrB_finalize ( ) ;
// --- in another process, to recreate the GrB_Matrix A:
GrB_init (GrB_NONBLOCKING) ;
FILE *f = fopen ("myblob", "r") ;
fread (&blob_size, sizeof (size_t), 1, f) ;
blob = malloc (blob_size) ;
fread (&blob, sizeof (uint8_t), 1, f) ;
char type_name [GxB_MAX_NAME_LEN] ;
GxB_deserialize_type_name (type_name, blob, blob_size) ;
printf ("blob type is: %s\n", type_name) ;
GrB_Type user_type = NULL ;
if (strncmp (type_name, "myquaternion", GxB_MAX_NAME_LEN) == 0)
user_type = MyQtype ;
GxB_Matrix_deserialize (&A, user_type, blob, blob_size, NULL) ;
free (blob) ; // note, freed by the user, not GraphBLAS
//--------------------------------------------------------------------------
// using GrB serialize/deserialize
//--------------------------------------------------------------------------
// Given a GrB_Matrix A: assuming a user-defined type, MyQType:
void *blob = NULL ;
GrB_Index blob_size = 0 ;
GrB_Matrix A, B = NULL ;
// construct a matrix A, then serialized it:
GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound
blob = malloc (blob_size) ;
GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size
blob = realloc (blob, blob_size) ; // user can shrink the blob
FILE *f = fopen ("myblob", "w") ;
fwrite (blob_size, sizeof (size_t), 1, f) ;
fwrite (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
GrB_Matrix_free (&A) ;
// B is a copy of A:
GrB_Matrix_deserialize (&B, MyQtype, blob, blob_size) ;
GrB_Matrix_free (&B) ;
free (blob) ;
GrB_finalize ( ) ;
// --- in another process, to recreate the GrB_Matrix A:
GrB_init (GrB_NONBLOCKING) ;
FILE *f = fopen ("myblob", "r") ;
fread (&blob_size, sizeof (size_t), 1, f) ;
blob = malloc (blob_size) ;
fread (&blob, sizeof (uint8_t), 1, f) ;
// the user must know the type of A is MyQType
GrB_Matrix_deserialize (&A, MyQtype, blob, blob_size) ;
free (blob) ;
*/
// Three methods are currently implemented: no compression, LZ4, and LZ4HC
#define GxB_COMPRESSION_NONE -1 // no compression
#define GxB_COMPRESSION_DEFAULT 0 // LZ4
#define GxB_COMPRESSION_LZ4 1000 // LZ4
#define GxB_COMPRESSION_LZ4HC 2000 // LZ4HC, with default level 9
// possible future methods that could be added:
// #define GxB_COMPRESSION_ZLIB 3000 // ZLIB, with default level 6
// #define GxB_COMPRESSION_LZO 4000 // LZO, with default level 2
// #define GxB_COMPRESSION_BZIP2 5000 // BZIP2, with default level 9
// #define GxB_COMPRESSION_LZSS 6000 // LZSS
// using the Intel IPP versions, if available (not yet supported);
#define GxB_COMPRESSION_INTEL 1000000
// Most of the above methods have a level parameter that controls the tradeoff
// between run time and the amount of compression obtained. Higher levels
// result in a more compact result, at the cost of higher run time:
// LZ4 no level setting
// LZ4HC 1: fast, 9: default, 9: max
// these methos are not yet supported but may be added in the future:
// ZLIB 1: fast, 6: default, 9: max
// LZO 1: fast (X1ST), 2: default (XST)
// BZIP2 1: fast, 9: default, 9: max
// LZSS no level setting
// For all methods, a level of zero results in the default level setting.
// These settings can be added, so to use LZ4HC at level 5, use method =
// GxB_COMPRESSION_LZ4HC + 5.
// If the Intel IPPS compression methods are available, they can be selected
// by adding GxB_COMPRESSION_INTEL. For example, to use the Intel IPPS
// implementation of LZ4HC at level 9, use method = GxB_COMPRESSION_INTEL +
// GxB_COMPRESSION_LZ4HC + 9 = 1,002,009. If the Intel methods are requested
// but not available, this setting is ignored and the non-Intel methods are
// used instead.
// If the level setting is out of range, the default is used for that method.
// If the method is negative, no compression is performed. If the method is
// positive but unrecognized, the default is used (GxB_COMPRESSION_LZ4, with no
// level setting, and the non-Intel version).
// If a method is not implemented, LZ4 is used instead, and the level setting
// is ignored.
GB_PUBLIC
GrB_Info GxB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void **blob_handle, // the blob, allocated on output
GrB_Index *blob_size_handle, // size of the blob on output
// input:
GrB_Matrix A, // matrix to serialize
const GrB_Descriptor desc // descriptor to select compression method
// and to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void *blob, // the blob, already allocated in input
// input/output:
GrB_Index *blob_size_handle, // size of the blob on input. On output,
// the # of bytes used in the blob.
// input:
GrB_Matrix A // matrix to serialize
) ;
GB_PUBLIC
GrB_Info GxB_Vector_serialize // serialize a GrB_Vector to a blob
(
// output:
void **blob_handle, // the blob, allocated on output
GrB_Index *blob_size_handle, // size of the blob on output
// input:
GrB_Vector u, // vector to serialize
const GrB_Descriptor desc // descriptor to select compression method
// and to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_serializeSize // estimate the size of a blob
(
// output:
GrB_Index *blob_size_handle, // upper bound on the required size of the
// blob on output.
// input:
GrB_Matrix A // matrix to serialize
) ;
// The GrB* and GxB* deserialize methods are nearly identical. The GxB*
// deserialize methods simply add the descriptor, which allows for optional
// control of the # of threads used to deserialize the blob.
GB_PUBLIC
GrB_Info GxB_Matrix_deserialize // deserialize blob into a GrB_Matrix
(
// output:
GrB_Matrix *C, // output matrix created from the blob
// input:
GrB_Type type, // type of the matrix C. Required if the blob holds a
// matrix of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of C.
const void *blob, // the blob
GrB_Index blob_size, // size of the blob
const GrB_Descriptor desc // to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_deserialize // deserialize blob into a GrB_Matrix
(
// output:
GrB_Matrix *C, // output matrix created from the blob
// input:
GrB_Type type, // type of the matrix C. Required if the blob holds a
// matrix of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of C.
const void *blob, // the blob
GrB_Index blob_size // size of the blob
) ;
GB_PUBLIC
GrB_Info GxB_Vector_deserialize // deserialize blob into a GrB_Vector
(
// output:
GrB_Vector *w, // output vector created from the blob
// input:
GrB_Type type, // type of the vector w. Required if the blob holds a
// vector of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of w.
const void *blob, // the blob
GrB_Index blob_size, // size of the blob
const GrB_Descriptor desc // to control # of threads used
) ;
// GxB_deserialize_type_name extracts the type_name of the GrB_Type of the
// GrB_Matrix or GrB_Vector held in a serialized blob. On input, type_name
// must point to a user-owned char array of size at least GxB_MAX_NAME_LEN (it
// must not point into the blob itself). On output, type_name will contain a
// null-terminated string with the corresponding C type name. If the blob
// holds a matrix of a built-in type, the name is returned as "bool" for
// GrB_BOOL, "uint8_t" for GrB_UINT8, "float complex" for GxB_FC32, etc.
// See GxB_Type_name to convert this name into a GrB_Type.
GB_PUBLIC
GrB_Info GxB_deserialize_type_name // return the type name of a blob
(
// output:
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
// input, not modified:
const void *blob, // the blob
GrB_Index blob_size // size of the blob
) ;
//==============================================================================
// GxB_Vector_sort and GxB_Matrix_sort: sort a matrix or vector
//==============================================================================
GB_PUBLIC
GrB_Info GxB_Vector_sort
(
// output:
GrB_Vector w, // vector of sorted values
GrB_Vector p, // vector containing the permutation
// input
GrB_BinaryOp op, // comparator op
GrB_Vector u, // vector to sort
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_sort
(
// output:
GrB_Matrix C, // matrix of sorted values
GrB_Matrix P, // matrix containing the permutations
// input
GrB_BinaryOp op, // comparator op
GrB_Matrix A, // matrix to sort
const GrB_Descriptor desc
) ;
#define GxB_sort(arg1,...) \
_Generic \
( \
(arg1), \
GrB_Vector : GxB_Vector_sort , \
GrB_Matrix : GxB_Matrix_sort \
) \
(arg1, __VA_ARGS__)
//==============================================================================
// GxB_Iterator: an object that iterates over the entries of a matrix or vector
//==============================================================================
/* Example usage:
single thread iteration of a whole matrix, one row at a time (in the
outer loop), and one entry at a time within the row (in the inner loop):
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the matrix A, known to be type GrB_FP64
GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to A(0,:)
info = GxB_rowIterator_seekRow (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// iterate over entries in A(i,:)
GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ;
while (info == GrB_SUCCESS)
{
// get the entry A(i,j)
GrB_Index j = GxB_rowIterator_getColIndex (iterator) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A(i,:)
info = GxB_rowIterator_nextCol (iterator) ;
}
// move to the next row, A(i+1,:)
info = GxB_rowIterator_nextRow (iterator) ;
}
GrB_free (&iterator) ;
parallel iteration using 4 threads (work may be imbalanced however):
GrB_Index nrows ;
GrB_wait (A, GrB_MATERIALIZE) ; // this is essential
GrB_Matrix_nrows (&nrows, A) ;
#pragma omp parallel for num_threads(4)
for (int tid = 0 ; tid < 4 ; tid++)
{
// thread tid operates on A(row1:row2-1,:)
GrB_Index row1 = tid * (nrows / 4) ;
GrB_Index row2 = (tid == 3) ? nrows : ((tid+1) * (nrows / 4)) ;
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to A(row1,:)
info = GxB_rowIterator_seekRow (iterator, row1) ;
while (info != GxB_EXHAUSTED)
{
// iterate over entries in A(i,:)
GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ;
if (i >= row2) break ;
while (info == GrB_SUCCESS)
{
// get the entry A(i,j)
GrB_Index j = GxB_rowIterator_getColIndex (iterator) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A(i,:)
info = GxB_rowIterator_nextCol (iterator) ;
}
// move to the next row, A(i+1,:)
info = GxB_rowIterator_nextRow (iterator) ;
}
GrB_free (&iterator) ;
}
In the parallel example above, a more balanced work distribution can be
obtained by first computing the row degree via GrB_mxv (see LAGraph), and
then compute the cumulative sum (ideally in parallel). Next, partition the
cumulative sum into one part per thread via binary search, and divide the
rows into parts accordingly.
*/
//------------------------------------------------------------------------------
// GxB_Iterator: definition and new/free methods
//------------------------------------------------------------------------------
// The contents of an iterator must not be directly accessed by the user
// application. Only the functions and macros provided here may access
// "iterator->..." contents. The iterator is defined here only so that macros
// can be used to speed up the use of the iterator methods. User applications
// must not use "iterator->..." directly.
struct GB_Iterator_opaque
{
// these components change as the iterator moves (via seek or next):
int64_t pstart ; // the start of the current vector
int64_t pend ; // the end of the current vector
int64_t p ; // position of the current entry
int64_t k ; // the current vector
// only changes when the iterator is created:
size_t header_size ; // size of this iterator object
// these components only change when the iterator is attached:
int64_t pmax ; // avlen*avdim for bitmap; nvals(A) otherwise
int64_t avlen ; // length of each vector in the matrix
int64_t avdim ; // number of vectors in the matrix dimension
int64_t anvec ; // # of vectors present in the matrix
const int64_t *GB_restrict Ap ; // pointers for sparse and hypersparse
const int64_t *GB_restrict Ah ; // vector names for hypersparse
const int8_t *GB_restrict Ab ; // bitmap
const int64_t *GB_restrict Ai ; // indices for sparse and hypersparse
const void *GB_restrict Ax ; // values for all 4 data structures
size_t type_size ; // size of the type of A
int A_sparsity ; // sparse, hyper, bitmap, or full
bool iso ; // true if A is iso-valued, false otherwise
bool by_col ; // true if A is held by column, false if by row
} ;
typedef struct GB_Iterator_opaque *GxB_Iterator ;
// GxB_Iterator_new: create a new iterator, not attached to any matrix/vector
GB_PUBLIC GrB_Info GxB_Iterator_new (GxB_Iterator *iterator) ;
// GxB_Iterator_free: free an iterator
GB_PUBLIC GrB_Info GxB_Iterator_free (GxB_Iterator *iterator) ;
//==============================================================================
// GB_Iterator_*: implements user-callable GxB_*Iterator_* methods
//==============================================================================
// GB_* methods are not user-callable. These methods appear here so that the
// iterator methods can be done via macros.
//------------------------------------------------------------------------------
// GB_Iterator_attach: attach a row/col/entry iterator to a matrix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_attach
(
GxB_Iterator iterator, // iterator to attach to the matrix A
GrB_Matrix A, // matrix to attach
GxB_Format_Value format, // by row, by col, or by entry (GxB_NO_FORMAT)
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_seek: seek a row/col iterator to a particular vector
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_rc_seek
(
GxB_Iterator iterator,
GrB_Index j,
bool jth_vector
) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_bitmap_next: move a row/col iterator to next entry in bitmap
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_rc_bitmap_next (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_knext: move a row/col iterator to the next vector
//------------------------------------------------------------------------------
#define GB_Iterator_rc_knext(iterator) \
( \
/* move to the next vector, and check if iterator is exhausted */ \
(++(iterator->k) >= iterator->anvec) ? \
( \
/* iterator is at the end of the matrix */ \
iterator->pstart = 0, \
iterator->pend = 0, \
iterator->p = 0, \
iterator->k = iterator->anvec, \
GxB_EXHAUSTED \
) \
: \
( \
/* find first entry in vector, and pstart/pend for this vector */ \
(iterator->A_sparsity <= GxB_SPARSE) ? \
( \
/* matrix is sparse or hypersparse */ \
iterator->pstart = iterator->Ap [iterator->k], \
iterator->pend = iterator->Ap [iterator->k+1], \
iterator->p = iterator->pstart, \
((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \
) \
: \
( \
/* matrix is bitmap or full */ \
iterator->pstart += iterator->avlen, \
iterator->pend += iterator->avlen, \
iterator->p = iterator->pstart, \
(iterator->A_sparsity <= GxB_BITMAP) ? \
( \
/* matrix is bitmap */ \
GB_Iterator_rc_bitmap_next (iterator) \
) \
: \
( \
/* matrix is full */ \
((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \
) \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_inext: move a row/col iterator the next entry in the vector
//------------------------------------------------------------------------------
#define GB_Iterator_rc_inext(iterator) \
( \
/* move to the next entry in the vector */ \
(++(iterator->p) >= iterator->pend) ? \
( \
/* no more entries in the current vector */ \
GrB_NO_VALUE \
) \
: \
( \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
/* the matrix is in bitmap form */ \
GB_Iterator_rc_bitmap_next (iterator) \
) \
: \
( \
GrB_SUCCESS \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_getj: get index of current vector for row/col iterator
//------------------------------------------------------------------------------
#define GB_Iterator_rc_getj(iterator) \
( \
(iterator->k >= iterator->anvec) ? \
( \
/* iterator is past the end of the matrix */ \
iterator->avdim \
) \
: \
( \
(iterator->A_sparsity == GxB_HYPERSPARSE) ? \
( \
/* return the name of kth vector: j = Ah [k] if it appears */ \
iterator->Ah [iterator->k] \
) \
: \
( \
/* return the kth vector: j = k */ \
iterator->k \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_geti: return index of current entry for row/col iterator
//------------------------------------------------------------------------------
#define GB_Iterator_rc_geti(iterator) \
( \
(iterator->Ai != NULL) ? \
( \
iterator->Ai [iterator->p] \
) \
: \
( \
(iterator->p - iterator->pstart) \
) \
)
//==============================================================================
// GxB_rowIterator_*: iterate over the rows of a matrix
//==============================================================================
#undef GxB_rowIterator_attach
#undef GxB_rowIterator_kount
#undef GxB_rowIterator_seekRow
#undef GxB_rowIterator_kseek
#undef GxB_rowIterator_nextRow
#undef GxB_rowIterator_nextCol
#undef GxB_rowIterator_getRowIndex
#undef GxB_rowIterator_getColIndex
//------------------------------------------------------------------------------
// GxB_rowIterator_attach: attach a row iterator to a matrix
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_rowIterator_attach attaches a row iterator to a matrix. If the iterator
// is already attached to a matrix, it is detached and then attached to the
// given matrix A.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or A are NULL.
// GrB_INVALID_OBJECT: if the matrix A is invalid.
// GrB_NOT_IMPLEMENTED: if the matrix A cannot be iterated by row.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the row iterator is attached to the matrix, but not to any
// specific row. Use GxB_rowIterator_*seek* to move the iterator to a row.
GB_PUBLIC
GrB_Info GxB_rowIterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
#define GxB_rowIterator_attach(iterator, A, desc) \
( \
GB_Iterator_attach (iterator, A, GxB_BY_ROW, desc) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_kount: upper bound on the # of nonempty rows of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_kount returns an upper bound on the # of non-empty rows of a
// matrix. A GraphBLAS library may always return this as simply nrows(A), but
// in some libraries, it may be a value between the # of rows with at least one
// entry, and nrows(A), inclusive. Any value in this range is a valid return
// value from this function.
// For SuiteSparse:GraphBLAS: If A is m-by-n, and sparse, bitmap, or full, then
// kount == m. If A is hypersparse, kount is the # of vectors held in the data
// structure for the matrix, some of which may be empty, and kount <= m.
GB_PUBLIC
GrB_Index GxB_rowIterator_kount (GxB_Iterator iterator) ;
#define GxB_rowIterator_kount(iterator) \
( \
(iterator)->anvec \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_seekRow: move a row iterator to a different row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_seekRow moves a row iterator to the first entry of A(row,:).
// If A(row,:) has no entries, the iterator may move to the first entry of next
// nonempty row i for some i > row. The row index can be determined by
// GxB_rowIterator_getRowIndex.
// For SuiteSparse:GraphBLAS: If the matrix is hypersparse, and the row
// does not appear in the hyperlist, then the iterator is moved to the first
// row after the given row that does appear in the hyperlist.
// The method is always successful; the following are conditions are returned:
// GxB_EXHAUSTED: if the row index is >= nrows(A); the row iterator is
// exhausted, but is still attached to the matrix.
// GrB_NO_VALUE: if the row index is valid but A(row,:) has no entries; the
// row iterator is positioned at A(row,:).
// GrB_SUCCESS: if the row index is valid and A(row,:) has at least one
// entry. The row iterator is positioned at A(row,:).
// GxB_rowIterator_get* can be used to return the indices of
// the first entry in A(row,:), and GxB_Iterator_get* can
// return its value.
GB_PUBLIC
GrB_Info GxB_rowIterator_seekRow (GxB_Iterator iterator, GrB_Index row) ;
#define GxB_rowIterator_seekRow(iterator, row) \
( \
GB_Iterator_rc_seek (iterator, row, false) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_kseek: move a row iterator to a different row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_kseek is identical to GxB_rowIterator_seekRow, except for
// how the row index is specified. The row is the kth non-empty row of A.
// More precisely, k is in the range 0 to kount-1, where kount is the value
// returned by GxB_rowIterator_kount.
GB_PUBLIC
GrB_Info GxB_rowIterator_kseek (GxB_Iterator iterator, GrB_Index k) ;
#define GxB_rowIterator_kseek(iterator, k) \
( \
GB_Iterator_rc_seek (iterator, k, true) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_nextRow: move a row iterator to the next row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must already be attached to a matrix via a prior
// call to GxB_rowIterator_attach, and the iterator must be at a specific row,
// via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow;
// results are undefined if this condition is not met.
// If the the row iterator is currently at A(row,:), it is moved to A(row+1,:),
// or to the first non-empty row after A(row,:), at the discretion of this
// method. That is, empty rows may be skipped.
// The method is always successful, and the return conditions are identical to
// the return conditions of GxB_rowIterator_seekRow.
GB_PUBLIC
GrB_Info GxB_rowIterator_nextRow (GxB_Iterator iterator) ;
#define GxB_rowIterator_nextRow(iterator) \
( \
GB_Iterator_rc_knext (iterator) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_nextCol: move a row iterator to the next entry in A(row,:)
//------------------------------------------------------------------------------
// On input, the row iterator must already be attached to a matrix via a prior
// call to GxB_rowIterator_attach, and the iterator must be at a specific row,
// via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow;
// results are undefined if this condition is not met.
// The method is always successful, and returns the following conditions:
// GrB_NO_VALUE: If the iterator is already exhausted, or if there is no
// entry in the current A(row,;),
// GrB_SUCCESS: If the row iterator has been moved to the next entry in
// A(row,:).
GB_PUBLIC
GrB_Info GxB_rowIterator_nextCol (GxB_Iterator iterator) ;
#define GxB_rowIterator_nextCol(iterator) \
( \
GB_Iterator_rc_inext ((iterator)) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_getRowIndex: get current row index of a row iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already successfully attached to matrix as a
// row iterator; results are undefined if this condition is not met.
// The method returns nrows(A) if the iterator is exhausted, or the current
// row index otherwise. There need not be any entry in the current row.
// Zero is returned if the iterator is attached to the matrix but
// GxB_rowIterator_*seek* has not been called, but this does not mean the
// iterator is positioned at row zero.
GB_PUBLIC
GrB_Index GxB_rowIterator_getRowIndex (GxB_Iterator iterator) ;
#define GxB_rowIterator_getRowIndex(iterator) \
( \
GB_Iterator_rc_getj ((iterator)) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_getColIndex: get current column index of a row iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already successfully attached to matrix as a
// row iterator, and in addition, the row iterator must be positioned at a
// valid entry present in the matrix. That is, the last call to
// GxB_rowIterator_*seek* or GxB_rowIterator_*next*, must have returned
// GrB_SUCCESS. Results are undefined if this condition is not met.
GB_PUBLIC
GrB_Index GxB_rowIterator_getColIndex (GxB_Iterator iterator) ;
#define GxB_rowIterator_getColIndex(iterator) \
( \
GB_Iterator_rc_geti ((iterator)) \
)
//==============================================================================
// GxB_colIterator_*: iterate over columns of a matrix
//==============================================================================
// The column iterator is analoguous to the row iterator.
#undef GxB_colIterator_attach
#undef GxB_colIterator_kount
#undef GxB_colIterator_seekCol
#undef GxB_colIterator_kseek
#undef GxB_colIterator_nextCol
#undef GxB_colIterator_nextRow
#undef GxB_colIterator_getColIndex
#undef GxB_colIterator_getRowIndex
// GxB_colIterator_attach: attach a column iterator to a matrix
GB_PUBLIC
GrB_Info GxB_colIterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
#define GxB_colIterator_attach(iterator, A, desc) \
( \
GB_Iterator_attach (iterator, A, GxB_BY_COL, desc) \
)
// GxB_colIterator_kount: return # of nonempty columns of the matrix
GB_PUBLIC
GrB_Index GxB_colIterator_kount (GxB_Iterator iterator) ;
#define GxB_colIterator_kount(iterator) \
( \
(iterator)->anvec \
)
// GxB_colIterator_seekCol: move a column iterator to A(:,col)
GB_PUBLIC
GrB_Info GxB_colIterator_seekCol (GxB_Iterator iterator, GrB_Index col) ;
#define GxB_colIterator_seekCol(iterator, col) \
( \
GB_Iterator_rc_seek (iterator, col, false) \
)
// GxB_colIterator_kseek: move a column iterator to kth non-empty column of A
GB_PUBLIC
GrB_Info GxB_colIterator_kseek (GxB_Iterator iterator, GrB_Index k) ;
#define GxB_colIterator_kseek(iterator, k) \
( \
GB_Iterator_rc_seek (iterator, k, true) \
)
// GxB_colIterator_nextCol: move a column iterator to first entry of next column
GB_PUBLIC
GrB_Info GxB_colIterator_nextCol (GxB_Iterator iterator) ;
#define GxB_colIterator_nextCol(iterator) \
( \
GB_Iterator_rc_knext ((iterator)) \
)
// GxB_colIterator_nextRow: move a column iterator to next entry in column
GB_PUBLIC
GrB_Info GxB_colIterator_nextRow (GxB_Iterator iterator) ;
#define GxB_colIterator_nextRow(iterator) \
( \
GB_Iterator_rc_inext ((iterator)) \
)
// GxB_colIterator_getColIndex: return the column index of current entry
GB_PUBLIC
GrB_Index GxB_colIterator_getColIndex (GxB_Iterator iterator) ;
#define GxB_colIterator_getColIndex(iterator) \
( \
GB_Iterator_rc_getj ((iterator)) \
)
// GxB_colIterator_getRowIndex: return the row index of current entry
GB_PUBLIC
GrB_Index GxB_colIterator_getRowIndex (GxB_Iterator iterator) ;
#define GxB_colIterator_getRowIndex(iterator) \
( \
GB_Iterator_rc_geti ((iterator)) \
)
//==============================================================================
// GxB_Matrix_Iterator_*: iterate over the entries of a matrix
//==============================================================================
// Example usage:
// single thread iteration of a whole matrix, one entry at at time
/*
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the matrix A, known to be type GrB_FP64
GrB_Info info = GxB_Matrix_Iterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to the first entry
info = GxB_Matrix_Iterator_seek (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// get the entry A(i,j)
GrB_Index i, j ;
GxB_Matrix_Iterator_getIndex (iterator, &i, &j) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A
info = GxB_Matrix_Iterator_next (iterator) ;
}
GrB_free (&iterator) ;
*/
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_attach: attach an entry iterator to a matrix
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_Matrix_Iterator_attach attaches an entry iterator to a matrix. If the
// iterator is already attached to a matrix, it is detached and then attached
// to the given matrix A.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or A are NULL.
// GrB_INVALID_OBJECT: if the matrix A is invalid.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the entry iterator is attached to the matrix, but not to any
// specific entry. Use GxB_Matrix_Iterator_*seek* to move the iterator to a
// particular entry.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getpmax: return the range of the iterator
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach; results are undefined if this condition is not
// met.
// Entries in a matrix are given an index p, ranging from 0 to pmax-1, where
// pmax >= nvals(A). For sparse, hypersparse, and full matrices, pmax is equal
// to nvals(A). For an m-by-n bitmap matrix, pmax=m*n, or pmax=0 if the
// matrix has no entries.
GB_PUBLIC
GrB_Index GxB_Matrix_Iterator_getpmax (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_seek: seek to a specific entry
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach; results are undefined if this condition is not
// met.
// The input p is in range 0 to pmax-1, which points to an entry in the matrix,
// or p >= pmax if the iterator is exhausted, where pmax is the return value
// from GxB_Matrix_Iterator_getpmax.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// matrix, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_next: move to the next entry of a matrix
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next. Results are undefined if these conditions are not
// met.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// matrix, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_next (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getp: get the current position of a matrix iterator
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next. Results are undefined if these conditions are not
// met.
GB_PUBLIC
GrB_Index GxB_Matrix_Iterator_getp (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getIndex: get the row and column index of a matrix entry
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next, with a return value of GrB_SUCCESS. Results are
// undefined if these conditions are not met.
GB_PUBLIC
void GxB_Matrix_Iterator_getIndex
(
GxB_Iterator iterator,
GrB_Index *row,
GrB_Index *col
) ;
//==============================================================================
// GxB_Vector_Iterator_*: iterate over the entries of a vector
//==============================================================================
/* Example usage:
single thread iteration of a whole vector, one entry at at time
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the vector v, known to be type GrB_FP64
GrB_Info info = GxB_Vector_Iterator_attach (iterator, v, NULL) ;
if (info < 0) { handle the failure ... }
// seek to the first entry
info = GxB_Vector_Iterator_seek (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// get the entry v(i)
GrB_Index i = GxB_Vector_Iterator_getIndex (iterator) ;
double vi = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in v
info = GxB_Vector_Iterator_next (iterator) ;
}
GrB_free (&iterator) ;
*/
#undef GxB_Vector_Iterator_getpmax
#undef GxB_Vector_Iterator_seek
#undef GxB_Vector_Iterator_next
#undef GxB_Vector_Iterator_getp
#undef GxB_Vector_Iterator_getIndex
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_attach: attach an iterator to a vector
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_Vector_Iterator_attach attaches an iterator to a vector. If the
// iterator is already attached to a vector or matrix, it is detached and then
// attached to the given vector v.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or v are NULL.
// GrB_INVALID_OBJECT: if the vector v is invalid.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the iterator is attached to the vector, but not to any
// specific entry. Use GxB_Vector_Iterator_seek to move the iterator to a
// particular entry.
GB_PUBLIC GrB_Info GxB_Vector_Iterator_attach
(
GxB_Iterator iterator,
GrB_Vector v,
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getpmax: return the range of the vector iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach; results are undefined if this condition is not
// met.
// Entries in a vector are given an index p, ranging from 0 to pmax-1, where
// pmax >= nvals(v). For sparse and full vectors, pmax is equal to nvals(v).
// For a size-m bitmap vector, pmax=m, or pmax=0 if the vector has no entries.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getpmax (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getpmax(iterator) \
( \
(iterator->pmax) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_seek: seek to a specific entry in the vector
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach; results are undefined if this condition is not
// met.
// The input p is in range 0 to pmax-1, which points to an entry in the vector,
// or p >= pmax if the iterator is exhausted, where pmax is the return value
// from GxB_Vector_Iterator_getpmax.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// vector, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GB_Vector_Iterator_bitmap_seek (GxB_Iterator iterator, GrB_Index p) ;
GB_PUBLIC
GrB_Info GxB_Vector_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ;
#define GB_Vector_Iterator_seek(iterator, q) \
( \
(q >= iterator->pmax) ? \
( \
/* the iterator is exhausted */ \
iterator->p = iterator->pmax, \
GxB_EXHAUSTED \
) \
: \
( \
/* seek to an arbitrary position in the vector */ \
iterator->p = q, \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
GB_Vector_Iterator_bitmap_seek (iterator, q) \
) \
: \
( \
GrB_SUCCESS \
) \
) \
)
#define GxB_Vector_Iterator_seek(iterator, p) \
( \
GB_Vector_Iterator_seek (iterator, p) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_next: move to the next entry of a vector
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next. Results are undefined if these conditions are not
// met.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// vector, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Vector_Iterator_next (GxB_Iterator iterator) ;
#define GB_Vector_Iterator_next(iterator) \
( \
/* move to the next entry */ \
(++(iterator->p) >= iterator->pmax) ? \
( \
/* the iterator is exhausted */ \
iterator->p = iterator->pmax, \
GxB_EXHAUSTED \
) \
: \
( \
GrB_SUCCESS \
) \
)
#define GxB_Vector_Iterator_next(iterator) \
( \
GB_Vector_Iterator_next (iterator) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getp: get the current position of a vector iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next. Results are undefined if these conditions are not
// met.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getp (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getp(iterator) \
( \
(iterator->p) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getIndex: get the index of a vector entry
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next, with a return value of GrB_SUCCESS. Results are
// undefined if these conditions are not met.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getIndex (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getIndex(iterator) \
( \
((iterator->Ai != NULL) ? iterator->Ai [iterator->p] : iterator->p) \
)
//==============================================================================
// GxB_Iterator_get_TYPE: get value of the current entry for any iterator
//==============================================================================
// On input, the prior call to GxB_*Iterator_*seek*, or GxB_*Iterator_*next*
// must have returned GrB_SUCCESS, indicating that the iterator is at a valid
// current entry for either a matrix or vector.
// Returns the value of the current entry at the position determined by the
// iterator. No typecasting is permitted; the method name must match the
// type of the matrix or vector.
#undef GxB_Iterator_get_BOOL
#undef GxB_Iterator_get_INT8
#undef GxB_Iterator_get_INT16
#undef GxB_Iterator_get_INT32
#undef GxB_Iterator_get_INT64
#undef GxB_Iterator_get_UINT8
#undef GxB_Iterator_get_UINT16
#undef GxB_Iterator_get_UINT32
#undef GxB_Iterator_get_UINT64
#undef GxB_Iterator_get_FP32
#undef GxB_Iterator_get_FP64
#undef GxB_Iterator_get_FC32
#undef GxB_Iterator_get_FC64
#undef GxB_Iterator_get_UDT
GB_PUBLIC bool GxB_Iterator_get_BOOL (GxB_Iterator iterator) ;
GB_PUBLIC int8_t GxB_Iterator_get_INT8 (GxB_Iterator iterator) ;
GB_PUBLIC int16_t GxB_Iterator_get_INT16 (GxB_Iterator iterator) ;
GB_PUBLIC int32_t GxB_Iterator_get_INT32 (GxB_Iterator iterator) ;
GB_PUBLIC int64_t GxB_Iterator_get_INT64 (GxB_Iterator iterator) ;
GB_PUBLIC uint8_t GxB_Iterator_get_UINT8 (GxB_Iterator iterator) ;
GB_PUBLIC uint16_t GxB_Iterator_get_UINT16 (GxB_Iterator iterator) ;
GB_PUBLIC uint32_t GxB_Iterator_get_UINT32 (GxB_Iterator iterator) ;
GB_PUBLIC uint64_t GxB_Iterator_get_UINT64 (GxB_Iterator iterator) ;
GB_PUBLIC float GxB_Iterator_get_FP32 (GxB_Iterator iterator) ;
GB_PUBLIC double GxB_Iterator_get_FP64 (GxB_Iterator iterator) ;
GB_PUBLIC GxB_FC32_t GxB_Iterator_get_FC32 (GxB_Iterator iterator) ;
GB_PUBLIC GxB_FC64_t GxB_Iterator_get_FC64 (GxB_Iterator iterator) ;
GB_PUBLIC void GxB_Iterator_get_UDT (GxB_Iterator iterator,
void *value) ;
#define GB_Iterator_get(iterator, type) \
( \
(((type *) (iterator)->Ax) [(iterator)->iso ? 0 : (iterator)->p]) \
)
#define GxB_Iterator_get_BOOL(iterator) GB_Iterator_get (iterator, bool)
#define GxB_Iterator_get_INT8(iterator) GB_Iterator_get (iterator, int8_t)
#define GxB_Iterator_get_INT16(iterator) GB_Iterator_get (iterator, int16_t)
#define GxB_Iterator_get_INT32(iterator) GB_Iterator_get (iterator, int32_t)
#define GxB_Iterator_get_INT64(iterator) GB_Iterator_get (iterator, int64_t)
#define GxB_Iterator_get_UINT8(iterator) GB_Iterator_get (iterator, uint8_t)
#define GxB_Iterator_get_UINT16(iterator) GB_Iterator_get (iterator, uint16_t)
#define GxB_Iterator_get_UINT32(iterator) GB_Iterator_get (iterator, uint32_t)
#define GxB_Iterator_get_UINT64(iterator) GB_Iterator_get (iterator, uint64_t)
#define GxB_Iterator_get_FP32(iterator) GB_Iterator_get (iterator, float)
#define GxB_Iterator_get_FP64(iterator) GB_Iterator_get (iterator, double)
#define GxB_Iterator_get_FC32(iterator) GB_Iterator_get (iterator, GxB_FC32_t)
#define GxB_Iterator_get_FC64(iterator) GB_Iterator_get (iterator, GxB_FC64_t)
#define GxB_Iterator_get_UDT(iterator, value) \
( \
(void) memcpy ((void *) value, ((const uint8_t *) ((iterator)->Ax)) + \
((iterator)->iso ? 0 : ((iterator)->type_size * (iterator)->p)), \
(iterator)->type_size) \
)
#endif
|
kernel_cpu_2.c | // #ifdef __cplusplus
// extern "C" {
// #endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <omp.h> // (in directory known to compiler)
#include <stdlib.h> // (in directory known to compiler)
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in directory provided here)
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/timer/timer.h" // (in directory provided here) needed by timer
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./kernel_cpu_2.h" // (in directory provided here)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void
kernel_cpu_2( int cores_arg,
knode *knodes,
long knodes_elem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
long *lastKnode,
long *offset_2,
int *start,
int *end,
int *recstart,
int *reclength)
{
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
// common variables
int i;
time0 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
int max_nthreads;
max_nthreads = omp_get_max_threads();
// printf("max # of threads = %d\n", max_nthreads);
omp_set_num_threads(cores_arg);
// printf("set # of threads = %d\n", cores_arg);
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
time1 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
// private thread IDs
int thid;
int bid;
// process number of querries
#pragma omp parallel for private (i, thid)
for(bid = 0; bid < count; bid++){
// process levels of the tree
for(i = 0; i < maxheight; i++){
// process all leaves at each level
for(thid = 0; thid < threadsPerBlock; thid++){
if((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodes[currKnode[bid]].indices[thid] < knodes_elem){
offset[bid] = knodes[currKnode[bid]].indices[thid];
}
}
if((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodes[lastKnode[bid]].indices[thid] < knodes_elem){
offset_2[bid] = knodes[lastKnode[bid]].indices[thid];
}
}
}
// set for next tree level
currKnode[bid] = offset[bid];
lastKnode[bid] = offset_2[bid];
}
// process leaves
for(thid = 0; thid < threadsPerBlock; thid++){
// Find the index of the starting record
if(knodes[currKnode[bid]].keys[thid] == start[bid]){
recstart[bid] = knodes[currKnode[bid]].indices[thid];
}
}
// process leaves
for(thid = 0; thid < threadsPerBlock; thid++){
// Find the index of the ending record
if(knodes[lastKnode[bid]].keys[thid] == end[bid]){
reclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1;
}
}
}
time2 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time2-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time2-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time2-time0) / 1000000);
} // main
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
// #ifdef __cplusplus
// }
// #endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_specialization_constants_buffer,
kind_stream,
kind_last = kind_stream
};
public:
SYCLIntegrationHeader(Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(StringRef MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType,
SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Update the names of a kernel description based on its SyclKernel.
void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name,
StringRef StableName) {
auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) {
return KD.SyclKernel == SyclKernel;
});
assert(Itr != KernelDescs.end() && "Unknown kernel description");
Itr->updateKernelNames(Name, StableName);
}
/// Note which free functions (this_id, this_item, etc) are called within the
/// kernel
void setCallsThisId(bool B);
void setCallsThisItem(bool B);
void setCallsThisNDItem(bool B);
void setCallsThisGroup(bool B);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// there are four free functions the kernel may call (this_id, this_item,
// this_nd_item, this_group)
struct KernelCallsSYCLFreeFunction {
bool CallsThisId = false;
bool CallsThisItem = false;
bool CallsThisNDItem = false;
bool CallsThisGroup = false;
};
// Kernel invocation descriptor
struct KernelDesc {
/// sycl_kernel function associated with this kernel.
const FunctionDecl *SyclKernel;
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// Whether kernel calls any of the SYCL free functions (this_item(),
// this_id(), etc)
KernelCallsSYCLFreeFunction FreeFunctionCalls;
// If we are in unnamed kernel/lambda mode AND this is one that the user
// hasn't provided an explicit name for.
bool IsUnnamedKernel;
KernelDesc(const FunctionDecl *SyclKernel, QualType NameType,
SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel)
: SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc),
IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel) {}
void updateKernelNames(StringRef Name, StringRef StableName) {
this->Name = Name.str();
this->StableName = StableName.str();
}
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
Sema &S;
};
class SYCLIntegrationFooter {
public:
SYCLIntegrationFooter(Sema &S) : S(S) {}
bool emit(StringRef MainSrc);
void addVarDecl(const VarDecl *VD);
private:
bool emit(raw_ostream &O);
Sema &S;
llvm::SmallVector<const VarDecl *> SpecConstants;
void emitSpecIDName(raw_ostream &O, const VarDecl *VD);
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 32;
static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
/// or a consteval if function.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
class GlobalMethodPool {
public:
using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
using iterator = llvm::DenseMap<Selector, Lists>::iterator;
iterator begin() { return Methods.begin(); }
iterator end() { return Methods.end(); }
iterator find(Selector Sel) { return Methods.find(Sel); }
std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
return Methods.insert(Val);
}
int count(Selector Sel) const { return Methods.count(Sel); }
bool empty() const { return Methods.empty(); }
private:
llvm::DenseMap<Selector, Lists> Methods;
};
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// Bitmask to contain the list of reasons a single diagnostic should be
/// emitted, based on its language. This permits multiple offload systems
/// to coexist in the same translation unit.
enum class DeviceDiagnosticReason {
/// Diagnostic doesn't apply to anything. Included for completeness, but
/// should make this a no-op.
None = 0,
/// OpenMP specific diagnostic.
OmpDevice = 1 << 0,
OmpHost = 1 << 1,
OmpAll = OmpDevice | OmpHost,
/// CUDA specific diagnostics.
CudaDevice = 1 << 2,
CudaHost = 1 << 3,
CudaAll = CudaDevice | CudaHost,
/// SYCL specific diagnostic.
Sycl = 1 << 4,
/// ESIMD specific diagnostic.
Esimd = 1 << 5,
/// A flag representing 'all'. This can be used to avoid the check
/// all-together and make this behave as it did before the
/// DiagnosticReason was added (that is, unconditionally emit).
/// Note: This needs to be updated if any flags above are added.
All = OmpAll | CudaAll | Sycl | Esimd,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All)
};
private:
// A collection of a pair of undefined functions and their callers known
// to be reachable from a routine on the device (kernel or device function).
typedef std::pair<const FunctionDecl *, const FunctionDecl *> CallPair;
llvm::SmallVector<CallPair> UndefinedReachableFromSyclDevice;
public:
// Helper routine to add a pair of Callee-Caller pair of FunctionDecl *
// to UndefinedReachableFromSyclDevice.
void addFDToReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
UndefinedReachableFromSyclDevice.push_back(std::make_pair(Callee, Caller));
}
// Helper routine to check if a pair of Callee-Caller FunctionDecl *
// is in UndefinedReachableFromSyclDevice.
bool isFDReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
return llvm::any_of(UndefinedReachableFromSyclDevice,
[Callee, Caller](const CallPair &P) {
return P.first == Callee && P.second == Caller;
});
}
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S, DeviceDiagnosticReason R);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second
<< std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second.AddFixItHint(
Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
SYCLIntelFPGALoopCountAttr *
BuildSYCLIntelFPGALoopCountAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *
BuildSYCLIntelFPGAInitiationIntervalAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAMaxConcurrencyAttr *
BuildSYCLIntelFPGAMaxConcurrencyAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAMaxInterleavingAttr *
BuildSYCLIntelFPGAMaxInterleavingAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGASpeculatedIterationsAttr *
BuildSYCLIntelFPGASpeculatedIterationsAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGALoopCoalesceAttr *
BuildSYCLIntelFPGALoopCoalesceAttr(const AttributeCommonInfo &CI, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
BTFTagAttr *mergeBTFTagAttr(Decl *D, const BTFTagAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
DeviceDiagnosticReason getEmissionReason(const FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
NamedReturnInfo getNamedReturnInfo(
Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult
PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the statements's reachability
/// analysis.
///
/// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
/// the function body is parsed, and then do a basic reachability analysis to
/// determine if the statement is reachable. If it is unreachable, the
/// diagnostic will not be emitted.
bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
ExprResult BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
ExprResult ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
// Checks that the vector type should be initialized from a scalar
// by splatting the value rather than populating a single element.
// This is the case for AltiVecVector types as well as with
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
// Checks if the -faltivec-src-compat=gcc option is specified.
// If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
// treated the same way as they are when trying to initialize
// these vectors on gcc (an error is emitted).
bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
QualType SrcTy);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
for (const ExpressionEvaluationContextRecord &context :
llvm::reverse(ExprEvalContexts)) {
if (context.isImmediateFunctionContext())
return true;
if (context.isUnevaluated())
return false;
}
return false;
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr, Expr *ZDimExpr);
void AddWorkGroupSizeHintAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDim, Expr *YDim, Expr *ZDim);
WorkGroupSizeHintAttr *
MergeWorkGroupSizeHintAttr(Decl *D, const WorkGroupSizeHintAttr &A);
void AddIntelReqdSubGroupSize(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelReqdSubGroupSizeAttr *
MergeIntelReqdSubGroupSizeAttr(Decl *D, const IntelReqdSubGroupSizeAttr &A);
IntelNamedSubGroupSizeAttr *
MergeIntelNamedSubGroupSizeAttr(Decl *D, const IntelNamedSubGroupSizeAttr &A);
void AddSYCLIntelNumSimdWorkItemsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNumSimdWorkItemsAttr *
MergeSYCLIntelNumSimdWorkItemsAttr(Decl *D,
const SYCLIntelNumSimdWorkItemsAttr &A);
void AddSYCLIntelESimdVectorizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelESimdVectorizeAttr *
MergeSYCLIntelESimdVectorizeAttr(Decl *D,
const SYCLIntelESimdVectorizeAttr &A);
void AddSYCLIntelSchedulerTargetFmaxMhzAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelSchedulerTargetFmaxMhzAttr *MergeSYCLIntelSchedulerTargetFmaxMhzAttr(
Decl *D, const SYCLIntelSchedulerTargetFmaxMhzAttr &A);
void AddSYCLIntelNoGlobalWorkOffsetAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNoGlobalWorkOffsetAttr *MergeSYCLIntelNoGlobalWorkOffsetAttr(
Decl *D, const SYCLIntelNoGlobalWorkOffsetAttr &A);
void AddSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelLoopFuseAttr *
MergeSYCLIntelLoopFuseAttr(Decl *D, const SYCLIntelLoopFuseAttr &A);
void AddIntelFPGAPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGAMaxReplicatesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAMaxReplicatesAttr *
MergeIntelFPGAMaxReplicatesAttr(Decl *D, const IntelFPGAMaxReplicatesAttr &A);
void AddIntelFPGAForcePow2DepthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAForcePow2DepthAttr *
MergeIntelFPGAForcePow2DepthAttr(Decl *D,
const IntelFPGAForcePow2DepthAttr &A);
void AddSYCLIntelFPGAInitiationIntervalAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *MergeSYCLIntelFPGAInitiationIntervalAttr(
Decl *D, const SYCLIntelFPGAInitiationIntervalAttr &A);
SYCLIntelFPGAMaxConcurrencyAttr *MergeSYCLIntelFPGAMaxConcurrencyAttr(
Decl *D, const SYCLIntelFPGAMaxConcurrencyAttr &A);
void AddSYCLIntelMaxGlobalWorkDimAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelMaxGlobalWorkDimAttr *
MergeSYCLIntelMaxGlobalWorkDimAttr(Decl *D,
const SYCLIntelMaxGlobalWorkDimAttr &A);
void AddIntelFPGABankWidthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGABankWidthAttr *
MergeIntelFPGABankWidthAttr(Decl *D, const IntelFPGABankWidthAttr &A);
void AddIntelFPGANumBanksAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGANumBanksAttr *
MergeIntelFPGANumBanksAttr(Decl *D, const IntelFPGANumBanksAttr &A);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
/// AddSYCLIntelFPGAMaxConcurrencyAttr - Adds a max_concurrency attribute to a
/// particular declaration.
void AddSYCLIntelFPGAMaxConcurrencyAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD,
bool CheckValueDependent = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
/// Called on well-formed '\#pragma omp metadirective' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// Process a canonical OpenMP loop nest that can either be a canonical
/// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
/// OpenMP loop transformation construct.
StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'when' clause.
OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *ActOnOpenMPMapClause(
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
class DeviceDeferredDiagnostic {
public:
DeviceDeferredDiagnostic(SourceLocation SL, const PartialDiagnostic &PD,
DeviceDiagnosticReason R)
: Diagnostic(SL, PD), Reason(R) {}
PartialDiagnosticAt &getDiag() { return Diagnostic; }
DeviceDiagnosticReason getReason() const { return Reason; }
private:
PartialDiagnosticAt Diagnostic;
DeviceDiagnosticReason Reason;
};
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<DeviceDeferredDiagnostic>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
enum class AttributeCompletion {
Attribute,
Scope,
None,
};
void CodeCompleteAttribute(
AttributeCommonInfo::Syntax Syntax,
AttributeCompletion Completion = AttributeCompletion::Attribute,
const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
std::unique_ptr<SYCLIntegrationFooter> SyclIntFooter;
// We need to store the list of the sycl_kernel functions and their associated
// generated OpenCL Kernels so we can go back and re-name these after the
// fact.
llvm::SmallVector<std::pair<const FunctionDecl *, FunctionDecl *>>
SyclKernelsToOpenCLKernels;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclOpenCLKernel(const FunctionDecl *SyclKernel,
FunctionDecl *OpenCLKernel) {
SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel);
}
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(*this);
return *SyclIntHeader.get();
}
SYCLIntegrationFooter &getSyclIntegrationFooter() {
if (SyclIntFooter == nullptr)
SyclIntFooter = std::make_unique<SYCLIntegrationFooter>(*this);
return *SyclIntFooter.get();
}
void addSyclVarDecl(VarDecl *VD) {
if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty())
getSyclIntegrationFooter().addVarDecl(VD);
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void copySYCLKernelAttrs(const CXXRecordDecl *KernelObj);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void SetSYCLKernelNames();
void MarkDevices();
/// Get the number of fields or captures within the parsed type.
ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given field number so that callers
/// can wrap it in a decltype() to get the actual type of the field.
ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc,
QualType SourceTy, Expr *Idx);
/// Get the number of base classes within the parsed type.
ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given base number so that callers
/// can wrap it in a decltype() to get the actual type of the base class.
ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy,
Expr *Idx);
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(
SourceLocation Loc, unsigned DiagID,
DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl |
DeviceDiagnosticReason::Esimd);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc,
DeviceDiagnosticReason Reason);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && VDecl->hasAttr<SYCLSimdAttr>() &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::sycl_private);
}
};
inline Expr *checkMaxWorkSizeAttrExpr(Sema &S, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = S.VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return nullptr;
E = ICE.get();
if (ArgVal.isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return E;
}
unsigned Val = ArgVal.getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< CI << E->getSourceRange();
return nullptr;
}
}
return E;
}
template <typename WorkGroupAttrType>
void Sema::addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
assert((XDimExpr && YDimExpr && ZDimExpr) &&
"argument has unexpected null value");
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (!XDimExpr->isValueDependent() && !YDimExpr->isValueDependent() &&
!ZDimExpr->isValueDependent()) {
// Save ConstantExpr in semantic attribute
XDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, XDimExpr);
YDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, YDimExpr);
ZDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, ZDimExpr);
if (!XDimExpr || !YDimExpr || !ZDimExpr)
return;
}
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
ompcompress.c | #ifdef _OPENMP
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1);
else
_t2(zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
int sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx);
else
_t2(zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint blocks = bx * by;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
uint b = block;
uint x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint blocks = bx * by * bz;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
else
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
uint nw = field->nw;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
int sw = field->sw ? field->sw : (int)(nx * ny * nz);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint bw = (nw + 3) / 4;
uint blocks = bx * by * bz * bw;
uint chunks = chunk_count_omp(stream, blocks, threads);
int chunk;
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
AI_model3.c | #include"AI.h"
#include <omp.h>
#define MAXSTEP 4
//#define CHECK_SCORE
//This is for model3
//the simulation function for the branches in the searching tree
int ai_model3_simulate(GameState *gameState, Player *player, int depth)
{
if(depth<=0)return ai_sum_scores(gameState,player);
int MaxScore=-60000;
int playerTurn=gameState->playerTurn;
int total_num_moves=0;
vector MovesStart,MovesEnd;
vector_init(&MovesStart);
vector_init(&MovesEnd);
int cnt=0;
for(int i=0;i<64;i++)
{
vector CurLegalMoves=env_get_legal_moves(gameState,player,i);
cnt=CurLegalMoves.count;
if(cnt>0){
vector_cat(&MovesEnd,&CurLegalMoves);
for(int j=0;j<cnt;j++) vector_add(&MovesStart,i);
}
vector_free(&CurLegalMoves);
total_num_moves+=cnt;
}
assert(MovesStart.count==MovesEnd.count);
int *Scores=malloc(sizeof(int)*total_num_moves);
// #pragma omp single
for(int i=0;i<total_num_moves;i++)
{
#pragma omp task shared(Scores)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,vector_get(&MovesStart,i),vector_get(&MovesEnd,i));
int score=playerTurn*ai_model3_simulate(&simulation,player,depth-1);
Scores[i]=score;
env_free_state(&simulation);
}
}
#pragma omp taskwait
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
vector_free(&MovesStart);
vector_free(&MovesEnd);
free(Scores);
return MaxScore*playerTurn;
}
//the play function for the root in the searching tree, return the quit from check_end
int ai_model3_play(GameState *gameState, Player *player, int maxStep)
{
int check_end=env_check_end(gameState,player);
if(check_end!=0)
{
env_free_container(gameState);
return check_end;
}
int MaxScore=-60000;
int score;
// vector MovesStart,MovesEnd,Scores;
// vector_init(&BestMovesID);
// vector_init(&MovesStart);
// vector_init(&MovesEnd);
// vector_init(&Scores);
int container_size=gameState->moves_vector_cnt;
int total_num_moves=0;
int *accu_container_size_arr=malloc(sizeof(int)*gameState->moves_vector_cnt);
for(int i=0;i<container_size;i++){
total_num_moves+=gameState->container[i].legal_moves.count;
if(i==0)accu_container_size_arr[0]=0;
else accu_container_size_arr[i]=accu_container_size_arr[i-1]+gameState->container[i-1].legal_moves.count;
}
int *MovesStart=malloc(sizeof(int)*total_num_moves);
int *MovesEnd=malloc(sizeof(int)*total_num_moves);
int *Scores=malloc(sizeof(int)*total_num_moves);
omp_set_num_threads(16);
omp_set_nested(1);
// #pragma omp parallel for shared(container_size,gameState,MovesStart,MovesEnd,accu_container_size_arr)
for(int i=0;i<container_size;i++)
{
vector CurLegalMoves=gameState->container[i].legal_moves;
int cnt=CurLegalMoves.count;
int pos=gameState->container[i].pos;
for(int j=0;j<cnt;j++){
MovesStart[accu_container_size_arr[i]+j]=pos;
MovesEnd[accu_container_size_arr[i]+j]=vector_get(&CurLegalMoves,j);
}
}
// assert(MovesStart.count==MovesEnd.count);
int playerTurn=gameState->playerTurn;
#pragma omp parallel
{
#pragma omp single
for(int i=0;i<total_num_moves;i++)
{
#pragma omp task shared(gameState,player,MovesStart,MovesEnd,Scores,playerTurn)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,MovesStart[i],MovesEnd[i]);
score=playerTurn*ai_model3_simulate(&simulation,player,maxStep);
Scores[i]=score;
env_free_state(&simulation);
}
}
#pragma omp taskwait
}
int BestMovesCnt=0;
vector BestMovesID;
vector_init(&BestMovesID);
if(stack_check_repeated_move(gameState->moves_stack)){
int MaxScoresArr[6];
for(int i=0;i<6;i++)MaxScoresArr[i]=-60000;
int MinScoreID,MinScoreArrValue;
for(int i=0;i<total_num_moves;i++){
MinScoreArrValue=MaxScoresArr[0];
MinScoreID=0;
for(int j=1;j<6;j++){
if(MaxScoresArr[j]<MinScoreArrValue){
MinScoreArrValue=MaxScoresArr[j];
MinScoreID=j;
}
}
MaxScoresArr[MinScoreID]=MAX(MaxScoresArr[MinScoreID],Scores[i]);
}
for(int i=0;i<total_num_moves;i++){
for(int j=0;j<6;j++){
if(Scores[i]==MaxScoresArr[j]){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
}
else{
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
for(int i=0;i<total_num_moves;i++){
if(Scores[i]==MaxScore){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
int id=vector_get(&BestMovesID,rand()%BestMovesCnt);
#ifdef CHECK_SCORE
printf("It is %d playing\n",gameState->playerTurn);
ai_print_board(gameState);
printf("Current Score is %d\n",ai_sum_scores(gameState,player));
#endif
env_play(gameState,player,MovesStart[id],MovesEnd[id]);
#ifdef CHECK_SCORE
printf("The player has decided to move from %d to %d\n",vector_get(&MovesStart,id),vector_get(&MovesEnd,id));
ai_print_board(gameState);
printf("After making the move, the score is %d\n",ai_sum_scores(gameState,player));
#endif
vector_free(&BestMovesID);
free(MovesStart);
free(MovesEnd);
free(Scores);
env_free_container(gameState);
return 0;
}
|
omp_parallel_for_if.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for if</ompts:directive>
<ompts:dependences></ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){
int known_sum;
<ompts:orphan:vars>
int num_threads;
int sum, sum2;
int i;
int control;
</ompts:orphan:vars>
control = 0;
num_threads=0;
sum = 0;
sum2 = 0;
#pragma omp parallel for private(i) <ompts:check>if (control==1)</ompts:check>
<ompts:orphan>
for (i=0; i <= LOOPCOUNT; i++)
{
num_threads = omp_get_num_threads();
sum = sum + i;
} /*end of for*/
</ompts:orphan>
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads);
return (known_sum == sum && num_threads == 1);
} /* end of check_parallel_for_private */
</ompts:testcode>
</ompts:test>
|
decoder.c | /*! @file
* @brief
*
* @version 1.0.0
*
* (C) Copyright 2017 GoPro Inc (http://gopro.com/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "config.h"
#include "timing.h"
#include <stddef.h>
#include <math.h>
#include <memory.h>
#include <time.h>
#ifndef DEBUG
#define DEBUG (1 && _DEBUG)
#endif
#ifndef TIMING
#define TIMING (1 && _TIMING)
#endif
#ifndef XMMOPT
#define XMMOPT (1 && _XMMOPT)
#endif
#ifdef _WIN32
#include <windows.h>
#define PATH_MAX MAX_PATH
#endif
#include <stdio.h>
#include <assert.h>
#include <emmintrin.h> // Intel aligned alloc and free
#include "decoder.h"
#include "codec.h"
#include "vlc.h"
#include "codebooks.h" // References to the codebooks
#include "color.h" // Color formats supported by image processing routines
#include "image.h"
#include "filter.h"
#include "spatial.h"
#include "temporal.h"
#include "convert.h"
#include "wavelet.h"
#include "bitstream.h"
#include "frame.h"
#include "cpuid.h"
#include "bayer.h"
#include "metadata.h"
#include "demosaicframes.h"
#include "swap.h"
#include "RGB2YUV.h"
#include "lutpath.h"
extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain,
int16_t *sptr, int resolution, int pixelsize);
extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize);
extern void FastSharpeningBlurVWP13(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
extern void FastSharpeningBlurVW13A(short *Aptr,
short *Bptr,
short *Cptr,
short *Dptr,
short *Eptr,
int pitch,
int edgenear,
short *output,
int pixels,
float sharpness,
int resolution,
int channel_blend_type);
#define ERROR_TOLERANT 1
#if defined(_WIN32) && DEBUG
#include <tchar.h> // For printing debug string in the console window
#endif
#define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform
#define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform
#if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch
#if _DECODE_TRANSFORM == 0
#define _DECODE_TRANSFORM 1
#endif
#endif
#ifndef _FSMBUFFER
#define _FSMBUFFER 0
#endif
// Turn off saturation in this file
#ifdef SATURATE
#undef SATURATE
#endif
#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x))
#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x))
//#define SATURATE8S(x) SATURATE_8S(x)
//#define SATURATE(x) (x)
// Pixel size used for computing the compression ratio
#define BITS_PER_PIXEL 8
#define DEMOSAIC_DELAYLINES 4
// Forward references
void AllocDecoderGroup(DECODER *decoder);
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format);
void EraseDecoderFrames(DECODER *decoder);
TRANSFORM *AllocGroupTransform(GROUP *group, int channel);
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format);
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile);
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch);
#endif
bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant);
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision);
extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index,
uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading,
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes);
//extern void ComputeCube(DECODER *decoder);
extern bool NeedCube(DECODER *decoder);
//extern int g_topdown;
//extern int g_bottomup;
// Performance measurements
#if _TIMING
extern TIMER tk_decompress; // Timers
extern TIMER tk_decoding;
extern TIMER tk_convert;
extern TIMER tk_inverse;
extern COUNTER decode_byte_count; // Counters
extern COUNTER sample_byte_count;
extern COUNTER alloc_group_count;
extern COUNTER alloc_transform_count;
extern COUNTER alloc_buffer_count;
extern COUNTER spatial_decoding_count;
extern COUNTER temporal_decoding_count;
extern COUNTER progressive_decode_count;
#endif
static const int pixel_size_table[] =
{
0, // COLOR_FORMAT_UNKNOWN
2, // COLOR_FORMAT_UYVY
2, // COLOR_FORMAT_YUYV
2, // COLOR_FORMAT_YVYU
0, // COLOR_FORMAT_YV12
0, // COLOR_FORMAT_I420
2, // COLOR_FORMAT_RGB16
3, // COLOR_FORMAT_RGB24
4, // COLOR_FORMAT_RGB32
0,
3, // COLOR_FORMAT_V210
0, // COLOR_FORMAT_RGB10
4, // COLOR_FORMAT_YU64
4, // COLOR_FORMAT_YR16
4, // COLOR_FORMAT_YUVA
};
static const int pixel_size_table_length = sizeof(pixel_size_table) / sizeof(pixel_size_table[0]);
static int PixelSize(int format)
{
int pixel_size = 0;
// Mask off the other fields in the format descriptor
// Use the lookup table to determine the pixel size (if possible)
if (0 <= format && format < pixel_size_table_length)
{
pixel_size = pixel_size_table[format];
//return pixel_size;
}
//TODO: Change the rest of this routine into one big switch statement
// Is this an Avid format?
else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END)
{
switch (format)
{
case COLOR_FORMAT_CbYCrY_8bit:
case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane
pixel_size = 1;
break;
case COLOR_FORMAT_CbYCrY_16bit:
case COLOR_FORMAT_CbYCrY_16bit_2_14:
case COLOR_FORMAT_CbYCrY_16bit_10_6:
pixel_size = 2;
break;
default:
assert(0);
pixel_size = 2; // Assume 16 bits per pixel if the format is unknown
break;
}
}
// Is this a Bayer format?
else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END)
{
pixel_size = (format - 100);
if (pixel_size > 2)
pixel_size = 2;
}
else if (format == COLOR_FORMAT_RG48)
pixel_size = 6;
else if (format == COLOR_FORMAT_RG64)
pixel_size = 8;
else if (format == COLOR_FORMAT_B64A)
{
pixel_size = 8;
}
return pixel_size;
}
int DecodedPixelSize(DECODED_FORMAT format)
{
int pixel_size = 0;
// Compute the pixel size
switch (format)
{
case DECODED_FORMAT_YUYV:
pixel_size = 2;
break;
case DECODED_FORMAT_RGB32:
pixel_size = 4;
break;
case DECODED_FORMAT_RG48:
pixel_size = 6;
break;
case DECODED_FORMAT_CT_UCHAR:
pixel_size = 2;
break;
case DECODED_FORMAT_CT_SHORT:
case DECODED_FORMAT_CT_SHORT_2_14:
case DECODED_FORMAT_CT_USHORT_10_6:
pixel_size = 4;
break;
case DECODED_FORMAT_CT_10Bit_2_8:
case DECODED_FORMAT_V210:
// This routine should not be called to compute the pixel sizes for these formats
assert(0);
return 0;
break;
case DECODED_FORMAT_ROW16U:
pixel_size = 4;
break;
default:
assert(0);
return 0;
break;
}
return pixel_size;
}
void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h)
{
int origw, origh, guess = 0;
origw = decoder->frame.width;
origh = decoder->frame.height;
switch (decoder->frame.resolution)
{
case DECODED_RESOLUTION_FULL:
break;
case DECODED_RESOLUTION_HALF:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
origw *= 8;
origh *= 8;
break;
case DECODED_RESOLUTION_FULL_DEBAYER:
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
origw *= 2;
origh *= 2;
break;
case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED:
origw *= 4;
origh *= 4;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved.
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
origw *= 2;
break;
case DECODED_RESOLUTION_HALF_VERTICAL:
origh *= 2;
break;
}
if (decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0)
guess = 1;
// if guess default values, we can't trust them
if (decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9)
guess = 1;
if (decoder->pixel_aspect_x && decoder->pixel_aspect_y)
{
int j, den, num;
decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y;
decoder->codec.picture_aspect_y = den = origh;
for (j = 2; j < num + den; j++)
{
while (num == (num / j)*j && den == (den / j)*j)
{
num /= j;
den /= j;
}
}
decoder->codec.picture_aspect_x = num;
decoder->codec.picture_aspect_y = den;
guess = 0;
}
if (guess)
{
if (origw > 720) //HD.
{
if (origh == 1080)
{
if (origw == 2048)
*w = origw, *h = origh;
else
*w = 16, *h = 9; // assume 16x9
}
else if (origh == 720)
{
*w = 16, *h = 9; // assume 16x9
}
else
{
*w = origw, *h = origh; // assume square pixel.
}
}
else
{
if (origh == 720)
{
*w = 16, *h = 9; // assume 16x9
}
else
{
*w = origw, *h = origh; // assume square pixel.
}
}
}
else
{
*w = decoder->codec.picture_aspect_x;
*h = decoder->codec.picture_aspect_y;
}
}
bool IsValidFrameResolution(int resolution)
{
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF:
case DECODED_RESOLUTION_QUARTER:
case DECODED_RESOLUTION_LOWPASS_ONLY:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
return true;
default:
return false;
}
}
// Return true if this decoder can decode to quarter resolution
bool IsQuarterResolutionEnabled(DECODER *decoder)
{
return true;
}
size_t DecoderSize()
{
return sizeof(DECODER);
}
void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs)
{
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder);
}
#endif
{
//TODO: Clear the decoder before setting the CPU limit and affinity
int i;
//int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0;
//save key params
Thread_cntrl saved_params = decoder->thread_cntrl;
// Clear everything
memset(decoder, 0, sizeof(DECODER));
//restore key params
if (saved_params.set_thread_params == 1) // used by the DShow Interface
{
decoder->thread_cntrl = saved_params;
}
#if _TIMING
InitTiming();
#endif
// Set the file for status information during decoding
decoder->logfile = logfile;
// Initialize the decoding error to no error
decoder->error = CODEC_ERROR_OKAY;
// Most recent marker found during decoding
decoder->marker = 0;
// Count of frames decoded
decoder->frame_count = 0;
// Set the codebooks that will be used for decoding
if (cs != NULL)
{
// Use the codeset provided in the call
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
// Codebook for decoding highpass coefficients
decoder->magsbook[i] = cs[i].magsbook;
// Codebook for decoding runs of coefficients
decoder->runsbook[i] = cs[i].runsbook;
// Lookup table for fast codebook search
decoder->fastbook[i] = cs[i].fastbook;
}
}
else
{
// Use the default codeset
decoder->magsbook[0] = cs9.magsbook;
decoder->runsbook[0] = cs9.runsbook;
decoder->fastbook[0] = cs9.fastbook;
}
// Initialize the codec state
InitCodecState(&decoder->codec);
InitScratchBuffer(&decoder->scratch, NULL, 0);
}
#if _ALLOCATOR
decoder->allocator = NULL;
#endif
decoder->initialized = 1; //DAN20060912
}
// Free data allocated within the decoder
void ClearDecoder(DECODER *decoder)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Free the transforms allocated in the decoder
int i;
if (decoder->initialized == 0)
return;
if (decoder->sqrttable)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->sqrttable);
#else
MEMORY_FREE(decoder->sqrttable);
#endif
decoder->sqrttable = NULL;
}
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
#if _ALLOCATOR
FreeTransform(allocator, decoder->transform[i]);
#else
FreeTransform(decoder->transform[i]);
#endif
decoder->transform[i] = NULL;
}
if (decoder->aligned_sample_buffer)
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
#endif
decoder->aligned_sample_buffer = NULL;
decoder->aligned_sample_buffer_size = 0;
}
if (decoder->tools)
{
#if _ALLOCATOR
Free(decoder->allocator, decoder->tools);
#else
MEMORY_FREE(decoder->tools);
#endif
decoder->tools = NULL;
}
// Free the buffer allocated for decoding
if (decoder->buffer != NULL)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
// Clear the fields in the scratch buffer descriptor
memset(&decoder->scratch, 0, sizeof(SCRATCH));
// Eventually the buffer and buffer size fields will be obsolete
}
for (i = 0; i < _MAX_CPUS; i++)
{
if (decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
// Do not attempt to free the codebooks since the
// codebook pointers are references to static tables
// Can free some of the data structures allocated by the decoder
FreeCodebooks(decoder);
#if _INTERLACED_WORKER_THREADS
if (decoder->interlaced_worker.lock_init) // threads started
{
int i;
// Signal this thread to stop
SetEvent(decoder->interlaced_worker.stop_event);
// Free all handles used by the worker threads
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
WaitForSingleObject(decoder->interlaced_worker.handle[i], UINT32_MAX); //JY20080307
CloseHandle(decoder->interlaced_worker.handle[i]);
CloseHandle(decoder->interlaced_worker.start_event[i]);
CloseHandle(decoder->interlaced_worker.done_event[i]);
}
CloseHandle(decoder->interlaced_worker.row_semaphore);
CloseHandle(decoder->interlaced_worker.stop_event);
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.handle[i] = 0;
decoder->interlaced_worker.start_event[i] = 0;
decoder->interlaced_worker.done_event[i] = 0;
}
decoder->interlaced_worker.row_semaphore = 0;
decoder->interlaced_worker.stop_event = 0;
}
// Free the critical section used by the worker threads
DeleteCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 0;
#endif
#if _THREADED
if (decoder->entropy_worker_new.pool.thread_count)
{
ThreadPoolDelete(&decoder->entropy_worker_new.pool);
DeleteLock(&decoder->entropy_worker_new.lock);
}
if (decoder->worker_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->worker_thread.pool);
DeleteLock(&decoder->worker_thread.lock);
}
if (decoder->draw_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->draw_thread.pool);
DeleteLock(&decoder->draw_thread.lock);
}
/*
if(decoder->qt_convert_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_convert_worker.pool);
DeleteLock(&decoder->qt_convert_worker.lock);
}
if(decoder->qt_scale_worker.pool.thread_count)
{
ThreadPoolDelete(&decoder->qt_scale_worker.pool);
DeleteLock(&decoder->qt_scale_worker.lock);
}
*/
if (decoder->parallelDecoder)
{
if (decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool);
DeleteLock(&decoder->parallelDecoder->decoder_thread.lock);
decoder->parallelDecoder->decoder_thread.pool.thread_count = 0;
}
ClearDecoder(decoder->parallelDecoder);
#if _ALLOCATOR
Free(decoder->allocator, decoder->parallelDecoder);
#else
MEMORY_FREE(decoder->parallelDecoder);
#endif
decoder->parallelDecoder = NULL;
}
#endif // _THREADED
#if _ALLOCATOR
if (decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = 0;
decoder->RGBFilterBufferSize = 0;
}
if (decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = 0;
decoder->RawBayerSize = 0;
}
if (decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = 0;
decoder->StereoBufferSize = 0;
}
if (decoder->RawCube)
{
FreeAligned(decoder->allocator, decoder->RawCube);
decoder->RawCube = 0;
}
if (decoder->Curve2Linear)
{
FreeAligned(decoder->allocator, decoder->Curve2Linear);
decoder->Curve2Linear = 0;
}
if (decoder->Linear2CurveRed)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if (decoder->Linear2CurveGrn)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if (decoder->Linear2CurveBlu)
{
FreeAligned(decoder->allocator, decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if (decoder->BYR4LinearRestore)
{
FreeAligned(decoder->allocator, decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if (decoder->GammaContrastRed)
{
FreeAligned(decoder->allocator, decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if (decoder->GammaContrastGrn)
{
FreeAligned(decoder->allocator, decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if (decoder->GammaContrastBlu)
{
FreeAligned(decoder->allocator, decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if (decoder->LUTcache)
Free(decoder->allocator, decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
for (i = 0; i < 64; i++)
{
if (decoder->mdc[i])
Free(decoder->allocator, decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#else // _ALLOCATOR
if (decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
if (decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
}
if (decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
decoder->StereoBufferSize = 0;
}
if (decoder->RawCube)
{
MEMORY_ALIGNED_FREE(decoder->RawCube);
decoder->RawCube = NULL;
}
if (decoder->Curve2Linear)
{
MEMORY_ALIGNED_FREE(decoder->Curve2Linear);
decoder->Curve2Linear = NULL;
}
if (decoder->BYR4LinearRestore)
{
MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore);
decoder->BYR4LinearRestore = NULL;
}
if (decoder->Linear2CurveRed)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed);
decoder->Linear2CurveRed = NULL;
}
if (decoder->Linear2CurveGrn)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn);
decoder->Linear2CurveGrn = NULL;
}
if (decoder->Linear2CurveBlu)
{
MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu);
decoder->Linear2CurveBlu = NULL;
}
if (decoder->GammaContrastRed)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastRed);
decoder->GammaContrastRed = NULL;
}
if (decoder->GammaContrastGrn)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn);
decoder->GammaContrastGrn = NULL;
}
if (decoder->GammaContrastBlu)
{
MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu);
decoder->GammaContrastBlu = NULL;
}
//3d LUT
{
if (decoder->LUTcache)
MEMORY_FREE(decoder->LUTcache);
decoder->LUTcache = NULL;
decoder->LUTcacheCRC = 0;
}
if (decoder->overrideData)
{
MEMORY_FREE(decoder->overrideData);
decoder->overrideData = NULL;
decoder->overrideSize = 0;
}
for (i = 0; i < 64; i++)
{
if (decoder->mdc[i])
MEMORY_FREE(decoder->mdc[i]);
decoder->mdc[i] = NULL;
decoder->mdc_size[i] = 0;
}
#endif // _ALLOCATOR
decoder->initialized = 0;// cleared
}
void ExitDecoder(DECODER *decoder)
{
// Let the caller keep the logfile open or choose to close it
//if (logfile) fclose(logfile);
// Free data allocated within the decoder
ClearDecoder(decoder);
}
// Allocate the data structures for decoding a group
void AllocDecoderGroup(DECODER *decoder)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;//DAN07022004
int channel;
assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004
{
TRANSFORM *transform = decoder->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL)
{
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL)
{
decoder->error = CODEC_ERROR_TRANSFORM_MEMORY;
return;
}
memset(transform, 0, sizeof(TRANSFORM));
decoder->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
}
}
// Allocate the buffer used for intermediate results during decoding
bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
int cpus;
size_t size;
size_t row_size;
char *buffer;
#if 0
// Allocate a buffer large enough for six rows of cache lines
size = width * sizeof(PIXEL);
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 2 * TRANSFORM_MAX_CHANNELS * size;
#else
// Allocate a buffer large enough for nine rows of cache lines
size = width * sizeof(PIXEL) * 4;
size = ALIGN(size, _CACHE_LINE_SIZE);
size = 3 * TRANSFORM_MAX_CHANNELS * size;
#endif
switch (format)
{
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
// Increase the buffer size for decoding to the V210 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_CbYCrY_10bit_2_8:
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
// Increase the buffer size for decoding to the YUV16 format
row_size = 4 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 8 * 2 * row_size;
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_WP13:
// Increase the buffer size for decoding to the YUV16 format
row_size = 6 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 12 * 2 * row_size;
break;
case DECODED_FORMAT_RG64:
// Increase the buffer size for decoding to the YUV16 format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
case DECODED_FORMAT_BYR3:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_BYR4:
// Increase the buffer size for decoding to the YUV16 format
row_size = 2 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 4 * 2 * row_size;
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_W13A:
// Increase the buffer size for decoding to the B64A format
row_size = 8 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 16 * 2 * row_size;
break;
default:
// Increase the buffer size for YUV to RGB conversion
row_size = 3 * width * sizeof(PIXEL);
row_size = ALIGN(row_size, _CACHE_LINE_SIZE);
size += 2 * 2 * row_size;
break;
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > 4)
size *= 4;
if (cpus > 16) //DAN20120803 -- 4444 clips
size *= 2;
// Has a buffer already been allocated?
if (decoder->buffer != NULL)
{
// Is the buffer large enough?
if (decoder->buffer_size < size)
{
// Free the previous buffer
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->buffer);
#else
MEMORY_ALIGNED_FREE(decoder->buffer);
#endif
decoder->buffer = NULL;
decoder->buffer_size = 0;
}
else
{
return true;
}
}
buffer = decoder->buffer;
if (buffer == NULL)
{
// Allocate the decoding buffer
#if _ALLOCATOR
buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if (buffer == NULL)
{
return false;
}
}
// Save the buffer and its size in the decoder
decoder->buffer = buffer;
decoder->buffer_size = size;
// Initialize the scratch space descriptor
InitScratchBuffer(&decoder->scratch, buffer, size);
// allocate buffer for each debayer/color formating thread
{
int i;
size = (width + 16) * 3 * 2 * 4 * 2 * 4; // sixteen lines
if (height * 4 > width * 3) //square or tall images where running out of scratch space for zooms.
size *= 1 + ((height + (width / 2)) / width);
if (decoder->threads_buffer_size < size)
{
for (i = 0; i < _MAX_CPUS; i++)
{
if (decoder->threads_buffer[i])
{
#if _ALLOCATOR
FreeAligned(decoder->allocator, decoder->threads_buffer[i]);
#else
MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]);
#endif
decoder->threads_buffer[i] = NULL;
}
}
decoder->threads_buffer_size = 0;
}
for (i = 0; i < cpus; i++)
{
if (decoder->threads_buffer[i] == NULL)
{
#if _ALLOCATOR
decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE);
#else
decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE);
#endif
if (decoder->threads_buffer[i] == NULL)
{
return false;
}
}
}
decoder->threads_buffer_size = size;
}
// Eventually the scratch space descriptor will replace the buffer and buffer_size fields
return true;
}
bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format)
{
// Check that the dimensions are valid
assert(width > 0);
assert(height > 0);
// Just call the allocation routine
return AllocDecoderBuffer(decoder, width, height, format);
}
void ClearTransformFlags(DECODER *decoder)
{
TRANSFORM **transform_array = decoder->transform;
int channel;
for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)
{
TRANSFORM *transform = transform_array[channel];
int index;
if (transform == NULL) break;
for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++)
{
IMAGE *wavelet = transform->wavelet[index];
if (wavelet != NULL)
{
wavelet->band_valid_flags = 0;
wavelet->band_started_flags = 0;
}
}
}
}
// Initialize the tables for decoding the wavelet transforms
void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands)
{
size_t subband_table_size = num_subbands * sizeof(int);
memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index));
memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size);
memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index));
memcpy(decoder->subband_band_index, subband_band_index, subband_table_size);
}
#if 0
static bool IsValidFormat(int format)
{
bool valid_format = true;
//TODO: Change this routine into a switch statement
if (format == COLOR_FORMAT_BYR5)
return true; // can decode to BYR5
if (format == COLOR_FORMAT_BYR4)
return true; // can decode to BYR4
if (format == COLOR_FORMAT_BYR3)
return true; // can decode to BYR3
if (format == COLOR_FORMAT_BYR2)
return true; // can decode to BYR2
if (format == COLOR_FORMAT_RG48)
return true; // can decode to RGB48
if (format == COLOR_FORMAT_RG64)
return true; // can decode to RGBA64
if (format == COLOR_FORMAT_B64A)
{
return true; // Can decode to B64A
}
if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT))
{
valid_format = false;
}
return valid_format;
}
#endif
#if _INTERLACED_WORKER_THREADS
void StartInterlaceWorkerThreads(DECODER *decoder)
{
int i;
if (decoder->interlaced_worker.lock_init == 0)
{
// Create events for starting the worker threads
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create a semaphore to signal the worker threads to process rows
decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
// Create an event for each worker thread to signal that it has finished
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL);
}
// Create an event for forcing the worker threads to terminate
decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL);
// Zero the count of worker threads that are active
decoder->interlaced_worker.thread_count = 0;
// Initialize the lock for controlling access to the worker thread data
InitializeCriticalSection(&decoder->interlaced_worker.lock);
decoder->interlaced_worker.lock_init = 1;
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
decoder->interlaced_worker.id[i] = 0;
decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]);
assert(decoder->interlaced_worker.handle[i] != NULL);
}
}
}
#endif
#if 0
int TestException(int x)
{
static volatile int y1 = 100;
volatile int x1 = x;
return y1 / x1;
}
#endif
// Process device driver request to initialize the decoder
#if _ALLOCATOR
bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#else
bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile)
#endif
{
CODESET codesets[CODEC_NUM_CODESETS];
int i;
int cpus;
//int x = 0;
#if CODEC_NUM_CODESETS == 3
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET));
#elif CODEC_NUM_CODESETS == 2
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET));
#else
memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET));
#endif
// Clear all decoder fields except the logfile and set the codebooks for decoding
InitDecoder(decoder, logfile, &codesets[0]);
#if _ALLOCATOR
decoder->allocator = allocator;
#endif
if (decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
assert(cpus > 0 && cpus <= _MAX_CPUS);
// Decode to half resolution?
if (resolution == DECODED_RESOLUTION_HALF)
{
// Reduce the frame size by half in each dimension
width = width / 2;
height = height / 2;
}
else if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Reduce the frame size by one fourth in each dimension
width = width / 4;
height = height / 4;
}
// Initialize the codebooks
#if _ALLOCATOR
if (!InitCodebooks(decoder->allocator, codesets))
{
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#else
if (!InitCodebooks(codesets))
{
//decoder->error = CODEC_ERROR_INIT_CODEBOOKS;
// The subroutine has already set the error code
return false;
}
#endif
// Initize the FSM
InitDecoderFSM(decoder, &codesets[0]);
// Check the frame dimensions and format
//assert(width > 0);
//assert(height > 0);
// assert(IsValidFormat(format));
#if _THREADED_DECODER
// Create a semaphore to signal the transform thread to begin processing
// Initialize the transform queue
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue));
#endif
#if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0
StartInterlaceWorkerThreads(decoder);
#endif
#if _THREADED
#if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if (cpus > 1)
{
int threads = cpus;
if (threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
// Initialize the lock that controls access to the generic worker thread data
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
cpus,
WorkerThreadProc,
decoder);
#endif
#endif
// Set the frame dimensions and format
SetDecoderFormat(decoder, width, height, format, resolution);
// Allocate the data structure for decoding the samples
AllocDecoderGroup(decoder);
// Note that this code assumes that the samples to decode are groups
// as opposed to isolated frames which are not supported in this code
// Allocate a buffer for storing intermediate results during decoding
if (!AllocDecoderBuffer(decoder, width, height, format))
{
return false;
}
// Should check that the finite state machine tables were initialized
assert(decoder->fsm[0].table.flags < 0);
// Initialize the finite state machine for this decoder
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
InitFSM(&decoder->fsm[i], codesets[i].fsm_table);
#if _COMPANDING
// Scale the values in the finite state machine entries for companding
ScaleFSM(&decoder->fsm[i].table);
#endif
}
// Indicate that the decoder has been initialized
decoder->state = DECODER_STATE_INITIALIZED;
#if _TIMING
// Initialize the global timers and counters
InitTiming();
#endif
//DAN20160203 Fix for a memory leak in InitCookbooks
for (i = 0; i < CODEC_NUM_CODESETS; i++)
{
#if _ALLOCATOR
Free(allocator, codesets[i].codebook_runbook);
codesets[i].codebook_runbook = NULL;
Free(allocator, codesets[i].fastbook);
codesets[i].fastbook = NULL;
Free(allocator, codesets[i].valuebook);
codesets[i].valuebook = NULL;
#else
MEMORY_FREE(codesets[i].codebook_runbook);
codesets[i].codebook_runbook = NULL;
MEMORY_FREE(codesets[i].fastbook);
codesets[i].fastbook = NULL;
MEMORY_FREE(codesets[i].valuebook);
codesets[i].valuebook = NULL;
#endif
}
// The decoder has been initialized successfully
return true;
}
void DecodeEntropyInit(DECODER *decoder)
{
int cpus = 1;
if (decoder->thread_cntrl.capabilities == 0)
{
// Determine the processor capabilities
SetDecoderCapabilities(decoder);
}
cpus = decoder->thread_cntrl.capabilities >> 16;
if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit)
{
cpus = decoder->cfhddata.cpu_limit;
decoder->thread_cntrl.limit = cpus;
decoder->thread_cntrl.set_thread_params = 1;
decoder->thread_cntrl.capabilities &= 0xffff;
decoder->thread_cntrl.capabilities |= cpus << 16;
}
assert(cpus > 0 && cpus <= _MAX_CPUS);
#if _THREADED
#if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START
if (cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0)
{
int threads = cpus;
if (threads > 4)
threads = 4;
CreateLock(&decoder->entropy_worker_new.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->entropy_worker_new.pool,
threads,
EntropyWorkerThreadProc,
decoder);
}
#endif
#endif
}
TRANSFORM *AllocGroupTransform(GROUP *group, int channel)
{
#if _ALLOCATOR
//TODO:ALLOC Change this routine to take an allocator as the first argument
ALLOCATOR *allocator = NULL;
#endif
TRANSFORM *transform;
// Channel zero is a special case because it may mean
// that the group header has not been decoded yet
if (channel != 0)
{
// Make sure that the channel number is in range
assert(0 <= channel && channel < group->header.num_channels);
if (!(0 <= channel && channel < group->header.num_channels))
return NULL;
}
transform = group->transform[channel];
// Need to allocate a transform data structure?
if (transform == NULL)
{
#if _ALLOCATOR
transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM));
#else
transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM));
#endif
assert(transform != NULL);
if (transform == NULL) return NULL;
memset(transform, 0, sizeof(TRANSFORM));
group->transform[channel] = transform;
#if _TIMING
alloc_transform_count++;
#endif
}
return transform;
}
//extern FILE *logfile;
void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format)
{
size_t size = height * pitch;
union
{
uint8_t byte[4];
uint32_t word;
} output;
switch (format)
{
case DECODED_FORMAT_YUYV:
output.byte[0] = COLOR_LUMA_BLACK;
output.byte[1] = COLOR_CHROMA_ZERO;
output.byte[2] = COLOR_LUMA_BLACK;
output.byte[3] = COLOR_CHROMA_ZERO;
break;
default:
//if (logfile) fprintf(logfile,"**Unknown format: %d\n", format);
//assert(0);
output.word = 0;
break;
}
memset(buffer, output.word, size);
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband);
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet);
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading);
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band);
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height);
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input);
// Apply the inverse horizontal-temporal transform to reconstruct the output frame
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision);
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision);
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// The first Bayer routine calls the other Bayer routines for the decoded resolution
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch);
// New code for handling the original YUV 4:2:2 encoded format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch);
// Return true if the rest of the channel does not have to be decoded
static bool CanSkipChannel(DECODER *decoder, int resolution)
{
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int transform_type = transform->type;
// Can the rest of the channel be skipped?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF);
break;
case DECODED_RESOLUTION_QUARTER:
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if (codec->channel == 3)
{
return true;
}
}
}
break;
}
}
else
{
const uint32_t decoded_subband_mask_half = 0x7F;
const uint32_t decoded_subband_mask_quarter = 0x0F;
assert(transform_type == TRANSFORM_TYPE_SPATIAL);
switch (resolution)
{
case DECODED_RESOLUTION_HALF:
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half);
break;
case DECODED_RESOLUTION_QUARTER:
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter);
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
return (codec->decoded_subband_flags & 1);
break;
default:
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)
{
// If we are requesting a YUV decode we don't need the 4th channel
if (codec->channel == 3)
{
return true;
}
}
}
break;
}
}
// Cannot skip the rest of the channel
return false;
}
#if 0
static bool CanSkipSubband(DECODER *decoder, int subband)
{
// Bitmask indicates which subbands must be decoded for quarter resolution
static uint32_t quarter_resolution_mask = 0x008F;
// Convert the subband number into a bitmask (could use a lookup table)
uint32_t subband_mask = SUBBAND_MASK(subband);
// Select the resolution of the fully decoded frames
int resolution = decoder->frame.resolution;
switch (resolution)
{
case DECODED_RESOLUTION_QUARTER:
//if (4 <= subband && subband <= 6)
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if ((subband_mask & quarter_resolution_mask) == 0)
{
return true;
}
}
break;
default:
// Assume that the subband must be decoded
break;
}
return false;
}
#endif
// Return true if the wavelet exists and all bands are valid
static bool AllBandsValid(IMAGE *wavelet)
{
return (wavelet != NULL && BANDS_ALL_VALID(wavelet));
}
#if DEBUG
static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS))
{
assert(0);
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES))
{
assert(0);
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!AllBandsValid(wavelet))
{
return false;
}
}
// All wavelet bands in all channels are valid
return true;
}
static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index)
{
int channel;
if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS))
{
return false;
}
if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES))
{
return false;
}
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform_array[channel]->wavelet[frame_index];
if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0)))
{
return false;
}
}
// All lowpass bands in all channels are valid
return true;
}
#endif
static bool
ComputeFrameDimensionsFromFirstWavelet(int transform_type,
int first_wavelet_width,
int first_wavelet_height,
int *frame_width_out,
int *frame_height_out)
{
int frame_width;
int frame_height;
int expansion = 8;
switch (transform_type)
{
case TRANSFORM_TYPE_SPATIAL:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
case TRANSFORM_TYPE_FIELDPLUS:
frame_width = first_wavelet_width * expansion;
frame_height = first_wavelet_height * expansion;
break;
default:
assert(0);
return false;
}
// Return the frame dimensions
*frame_width_out = frame_width;
*frame_height_out = frame_height;
return true;
}
// Decode the sample header to determine the type of sample and other parameters
bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header)
{
TAGVALUE segment;
int sample_type;
int sample_size = 0;
// Group index
uint32_t channel_size[TRANSFORM_MAX_CHANNELS];
// Number of channels in the group index
int channel_count;
// Values used for computing the frame width and height (if necessary)
int transform_type = -1;
int first_wavelet_width = 0;
int first_wavelet_height = 0;
int display_height = 0;
int current_channel = 0;
int currentVideoChannel = header->videoChannels;
int find_lowpass_bands = header->find_lowpass_bands & 1;
int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0;
int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0;
if (header == NULL)
{
return false;
}
if (currentVideoChannel == 0)
currentVideoChannel = 1;
// Clear the entire sample header to prevent early return from this routine
memset(header, 0, sizeof(SAMPLE_HEADER));
// Clear the error code
header->error = CODEC_ERROR_OKAY;
// Initialize the frame dimensions to unknown
header->width = 0;
header->height = 0;
header->videoChannels = 1;
// Initialize the original pixel format to unknown
header->input_format = COLOR_FORMAT_UNKNOWN;
// Initialize the encoded format to unknown
header->encoded_format = ENCODED_FORMAT_UNKNOWN;
// Clear the frame number in case it is not present in the sample
header->frame_number = 0;
// The video is not progressive if the sample flags are not present
header->hdr_progressive = false;
#if _BITSTREAM_UNALIGNED
// Record the alignment of the bitstream within the sample
SetBitstreamAlignment(input, 0);
#endif
sample_size = input->nWordsUsed;
// Get the type of sample (should be the first tag value pair)
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE))
{
header->error = CodecErrorBitstream(input);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = false;
break;
case SAMPLE_TYPE_FRAME: // The second or later frame in a group
header->key_frame = false;
header->difference_frame = true;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_IFRAME: // One frame in the group
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// Treat the video sequence header like a keyframe that can be dropped
header->key_frame = true;
header->difference_frame = false;
header->droppable_frame = true;
break;
default:
// Unknown type of sample
header->error = CODEC_ERROR_SAMPLE_TYPE;
return false;
break;
}
// Continue parsing the sample header until all of the information has been found
while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all
(find_uncompressed == 1 && current_channel < 1) ||
display_height == 0 ||
header->width == 0 ||
header->height == 0 ||
header->input_format == COLOR_FORMAT_UNKNOWN ||
header->frame_number == 0 ||
(header->interlaced_flags == 0 && header->hdr_progressive == 0))
{
int chunksize = 0;
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did the bitstream end before the last tag was found?
if (input->error == BITSTREAM_ERROR_UNDERFLOW)
{
break;
}
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY)
{
header->error = CodecErrorBitstream(input);
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0)
{
segment.tuple.tag = NEG(segment.tuple.tag);
}
if (segment.tuple.tag & 0x2000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
chunksize += ((segment.tuple.tag & 0xff) << 16);
}
else if (segment.tuple.tag & 0x4000)
{
chunksize = segment.tuple.value;
chunksize &= 0xffff;
}
// else if(tag == CODEC_TAG_INDEX) // handled below
// {
// chunksize = value;
// chunksize &= 0xffff;
// }
else
{
chunksize = 0;
}
if ((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000)
{
int skip = 1;
if ((segment.tuple.tag & 0xff00) == 0x2200) //sample size
{
if (sample_size < chunksize * 4)
find_header_info_only = 1;
skip = find_header_info_only;
if (currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only)
{
BITSTREAM input2;
SAMPLE_HEADER header2;
BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize * 4);
int eye_offset = sample_size - input->nWordsUsed + chunksize * 4; //approx
int eye_sample_size = input->nWordsUsed - eye_offset;
// Search for first sample of the next frame
while ((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0)
{
eye2 += 4;
chunksize ++;
eye_offset += 4;
eye_sample_size -= 4;
}
// Save the offset to the right stereo sample
header->left_sample_size = eye_offset;
{
InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ);
memset(&header2, 0, sizeof(SAMPLE_HEADER));
header2.find_lowpass_bands = 1;
currentVideoChannel++;
header2.videoChannels = currentVideoChannel;
if (ParseSampleHeader(&input2, &header2))
{
int i;
for (i = 0; i < 4; i++)
{
if (header2.thumbnail_channel_offsets[i])
header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i];
}
}
}
}
}
if ((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size
{
header->hdr_uncompressed = 1;
skip = 1;
if (find_lowpass_bands != 1)
break;
}
if ((segment.tuple.tag & 0xff00) == 0x2100) //level
{
if (find_lowpass_bands == 1)
{
skip = 0;
}
else
{
skip = 1; // no header data after the fix level
break;
}
}
if (chunksize)
{
if (skip)
{
input->lpCurrentWord += chunksize * 4;
input->nWordsUsed -= chunksize * 4;
}
}
else
{
switch (segment.tuple.tag)
{
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
header->encoder_version = (((segment.tuple.value >> 12) & 0xf) << 16) |
(((segment.tuple.value >> 8) & 0xf) << 8) |
((segment.tuple.value) & 0xff);
break;
case CODEC_TAG_INDEX:
// Get the number of channels in the index to skip
channel_count = segment.tuple.value;
DecodeGroupIndex(input, (uint32_t *)&channel_size[0], channel_count);
break;
case CODEC_TAG_FRAME_WIDTH:
// Record the frame width in the sample header
header->width = segment.tuple.value;
break;
case CODEC_TAG_FRAME_HEIGHT:
// Record the frame height in the sample header
header->height = segment.tuple.value;
break;
case CODEC_TAG_FRAME_DISPLAY_HEIGHT:
display_height = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_WIDTH:
// Save the width of the smallest wavelet for computing the frame dimensions
first_wavelet_width = segment.tuple.value;
break;
case CODEC_TAG_LOWPASS_HEIGHT:
// Save the height of the smallest wavelet for computing the frame dimensions
first_wavelet_height = segment.tuple.value;
break;
case CODEC_TAG_TRANSFORM_TYPE:
// Save the type of transform for computing the frame dimensions (if necessary)
transform_type = segment.tuple.value;
break;
case CODEC_TAG_INPUT_FORMAT:
// Record the original format of the encoded frames
header->input_format = (COLOR_FORMAT)segment.tuple.value;
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
// Record the encoded format (internal representation)
header->encoded_format = (ENCODED_FORMAT)segment.tuple.value;
if (header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3)
header->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_NUMBER:
// Record the frame number for debugging
header->frame_number = segment.tuple.value;
break;
case CODEC_TAG_INTERLACED_FLAGS:
// Record the flags that indicate the field type
header->interlaced_flags = segment.tuple.value;
break;
case CODEC_TAG_SAMPLE_FLAGS:
// The sample flags specify progressive versus interlaced decoding
header->hdr_progressive = !!(segment.tuple.value & SAMPLE_FLAGS_PROGRESSIVE);
if (header->hdr_progressive)
{
// Clear the interlaced flags
header->interlaced_flags = 0;
}
break;
case CODEC_TAG_LOWPASS_SUBBAND:
if (segment.tuple.value == 0) // low pass band
{
int count = 8;
uint32_t *lptr = (uint32_t *)input->lpCurrentWord;
do
{
uint32_t longword = SwapInt32(lptr[count]);
unsigned short t, v;
t = (longword >> 16) & 0xffff;
v = (longword) & 0xffff;
if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4)
{
header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count * 4 + 4;
break;
}
count++;
} while (count < 32);
current_channel++;
}
break;
case CODEC_TAG_ENCODED_CHANNELS:
if (header->videoChannels == 1)
{
header->videoChannels = segment.tuple.value;
if (header->videoChannels < 1)
header->videoChannels = 1;
}
break;
case CODEC_TAG_QUALITY_L: //
header->encode_quality &= 0xffff0000;
header->encode_quality |= segment.tuple.value;
break;
case CODEC_TAG_QUALITY_H: //
header->encode_quality &= 0xffff;
header->encode_quality |= segment.tuple.value << 16;
break;
}
// Have the encoded frame dimensions been computed?
if (header->width == 0 || header->height == 0)
{
// Found the first wavelet in the bitstream?
if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0)
{
// The group header did not contain tags for the frame dimensions
// prior to the release of support for RGB 4:4:4, so must attempt to
// compute the frame dimensions from the dimensions of the lowpass band.
int frame_width = 0;
int frame_height = 0;
// Use the dimensions of the first wavelet to compute the frame width and height
if (!ComputeFrameDimensionsFromFirstWavelet(transform_type,
first_wavelet_width,
first_wavelet_height,
&frame_width,
&frame_height))
{
// Could not compute the frame dimensions
header->error = CODEC_ERROR_FRAME_DIMENSIONS;
return false;
}
// Save the frame dimensions in the sample header
header->width = frame_width;
header->height = frame_height;
// No more header information after finding the lowpass band
break;
}
}
if (find_lowpass_bands != 1 && find_uncompressed != 1)
{
// No more header information after the first encoded band
if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER)
{
// Stop looking for header information
break;
}
// No more header information after the frame index
if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX)
{
// Stop looking for header information
break;
}
// No more header information after the lowpass band header
if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH)
{
// Stop looking for header information
break;
}
}
}
}
}
if (header->width == 0 || header->height == 0)
{
assert(0);
}
// Fill in the encoded format if it was not present in the header
if (header->encoded_format == ENCODED_FORMAT_UNKNOWN)
{
header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count);
}
if (display_height > 0)
{
header->height = display_height;
}
if (header->encoded_format == ENCODED_FORMAT_BAYER)
{
header->width *= 2;
header->height *= 2;
if (display_height == 0)
{
if (header->height == 1088)
header->height = 1080;
}
}
// Return true if the header was parsed completely and correctly
return (header->width > 0 &&
header->height > 0 &&
((sample_type == SAMPLE_TYPE_FRAME) ||
(header->input_format != COLOR_FORMAT_UNKNOWN &&
header->encoded_format != ENCODED_FORMAT_UNKNOWN)));
// It is not an error if the frame number was not found in the sample header
}
bool DumpSampleHeader(BITSTREAM *input, FILE *logfile)
{
TAGVALUE segment;
int lowpass_width = 0;
int lowpass_height = 0;
// Parse the sample header until the lowpass band is found
while (lowpass_width == 0 && lowpass_height == 0)
{
// Get the next tag value pair from the bitstream
segment = GetSegment(input);
// Did an error occur while reading the bitstream?
if (input->error != BITSTREAM_ERROR_OKAY)
{
return false;
}
// Is this an optional tag?
if (segment.tuple.tag < 0)
{
segment.tuple.tag = NEG(segment.tuple.tag);
}
// Check that the tag is valid
assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED);
switch (segment.tuple.tag)
{
case CODEC_TAG_SAMPLE:
fprintf(logfile, "Sample type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_WIDTH:
fprintf(logfile, "Frame width: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_HEIGHT:
fprintf(logfile, "Frame height: %d\n", segment.tuple.value);
break;
case CODEC_TAG_LOWPASS_WIDTH:
lowpass_width = segment.tuple.value;
fprintf(logfile, "Lowpass width: %d\n", lowpass_width);
break;
case CODEC_TAG_LOWPASS_HEIGHT:
lowpass_height = segment.tuple.value;
fprintf(logfile, "Lowpass height: %d\n", lowpass_height);
break;
case CODEC_TAG_TRANSFORM_TYPE:
fprintf(logfile, "Transform type: %d\n", segment.tuple.value);
break;
case CODEC_TAG_INPUT_FORMAT:
fprintf(logfile, "Input format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_ENCODED_FORMAT:
case CODEC_TAG_OLD_ENCODED_FORMAT:
fprintf(logfile, "Encoded format: %d\n", segment.tuple.value);
break;
case CODEC_TAG_FRAME_NUMBER:
fprintf(logfile, "Frame number: %d\n", segment.tuple.value);
break;
}
}
return true;
}
int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work
{
TAGWORD tag, value = 1;
unsigned char *pos = NULL;
int readsize = input->nWordsUsed;
if (readsize > 4096) // only need to scan the first few tuplets
{
readsize = 4096;
}
else
{
//Tiny therefore P-frame, nothing to be read so:
value = decoder->real_channels; // return the last value.
return value;
}
pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value);
if (pos && value > 1 && skip_to_channel > 1)
{
int chunksize = 0;
intptr_t offset;
int count = 0;
do
{
tag = *pos++ << 8;
tag |= *pos++;
value = *pos++ << 8;
value |= *pos++;
if (tag < 0)
{
tag = NEG(tag);
}
} while ((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10);
if ((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag & 0xff) << 16);
offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize * 4;
input->lpCurrentWord += offset;
input->nWordsUsed -= (int)offset;
{
uint8_t *tag = (uint8_t *)input->lpCurrentWord;
// Search for first sample of the next frame
while ((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0)
{
input->lpCurrentWord += 4;
input->nWordsUsed -= 4;
tag += 4;
}
}
}
}
//if(value == 0) value = 1; // old non-stereo file
return value;
}
#define SUBPIXEL 64
static short gains[SUBPIXEL + 1][4] =
{
{0 * 128, 0 * 128, 0x7fff, 0 * 128},
{0 * 128, 2 * 128, 0x7fff, -2 * 128},
{0 * 128, 5 * 128, 255 * 128, -4 * 128},
{0 * 128, 8 * 128, 254 * 128, -6 * 128},
{0 * 128, 11 * 128, 253 * 128, -8 * 128},
{0 * 128, 14 * 128, 252 * 128, -10 * 128},
{0 * 128, 18 * 128, 250 * 128, -12 * 128},
{0 * 128, 21 * 128, 248 * 128, -13 * 128},
{-1 * 128, 25 * 128, 247 * 128, -15 * 128},
{-1 * 128, 29 * 128, 244 * 128, -16 * 128},
{-1 * 128, 33 * 128, 241 * 128, -17 * 128},
{-2 * 128, 37 * 128, 239 * 128, -18 * 128},
{-2 * 128, 41 * 128, 236 * 128, -19 * 128},
{-3 * 128, 46 * 128, 233 * 128, -20 * 128},
{-3 * 128, 50 * 128, 229 * 128, -20 * 128},
{-4 * 128, 55 * 128, 226 * 128, -21 * 128},
{-4 * 128, 60 * 128, 221 * 128, -21 * 128},
{-5 * 128, 65 * 128, 217 * 128, -21 * 128},
{-5 * 128, 70 * 128, 213 * 128, -22 * 128},
{-6 * 128, 75 * 128, 209 * 128, -22 * 128},
{-7 * 128, 80 * 128, 205 * 128, -22 * 128},
{-7 * 128, 85 * 128, 199 * 128, -21 * 128},
{-8 * 128, 91 * 128, 194 * 128, -21 * 128},
{-9 * 128, 96 * 128, 190 * 128, -21 * 128},
{-10 * 128, 102 * 128, 185 * 128, -21 * 128},
{-10 * 128, 107 * 128, 179 * 128, -20 * 128},
{-11 * 128, 113 * 128, 174 * 128, -20 * 128},
{-12 * 128, 118 * 128, 169 * 128, -19 * 128},
{-13 * 128, 124 * 128, 164 * 128, -19 * 128},
{-14 * 128, 129 * 128, 159 * 128, -18 * 128},
{-14 * 128, 135 * 128, 152 * 128, -17 * 128},
{-15 * 128, 141 * 128, 147 * 128, -17 * 128},
{-16 * 128, 144 * 128, 144 * 128, -16 * 128},
{-17 * 128, 147 * 128, 141 * 128, -15 * 128},
{-17 * 128, 152 * 128, 135 * 128, -14 * 128},
{-18 * 128, 159 * 128, 129 * 128, -14 * 128},
{-19 * 128, 164 * 128, 124 * 128, -13 * 128},
{-19 * 128, 169 * 128, 118 * 128, -12 * 128},
{-20 * 128, 174 * 128, 113 * 128, -11 * 128},
{-20 * 128, 179 * 128, 107 * 128, -10 * 128},
{-21 * 128, 185 * 128, 102 * 128, -10 * 128},
{-21 * 128, 190 * 128, 96 * 128, -9 * 128},
{-21 * 128, 194 * 128, 91 * 128, -8 * 128},
{-21 * 128, 199 * 128, 85 * 128, -7 * 128},
{-22 * 128, 205 * 128, 80 * 128, -7 * 128},
{-22 * 128, 209 * 128, 75 * 128, -6 * 128},
{-22 * 128, 213 * 128, 70 * 128, -5 * 128},
{-21 * 128, 217 * 128, 65 * 128, -5 * 128},
{-21 * 128, 221 * 128, 60 * 128, -4 * 128},
{-21 * 128, 226 * 128, 55 * 128, -4 * 128},
{-20 * 128, 229 * 128, 50 * 128, -3 * 128},
{-20 * 128, 233 * 128, 46 * 128, -3 * 128},
{-19 * 128, 236 * 128, 41 * 128, -2 * 128},
{-18 * 128, 239 * 128, 37 * 128, -2 * 128},
{-17 * 128, 241 * 128, 33 * 128, -1 * 128},
{-16 * 128, 244 * 128, 29 * 128, -1 * 128},
{-15 * 128, 247 * 128, 25 * 128, -1 * 128},
{-13 * 128, 248 * 128, 21 * 128, 0 * 128},
{-12 * 128, 250 * 128, 18 * 128, 0 * 128},
{-10 * 128, 252 * 128, 14 * 128, 0 * 128},
{-8 * 128, 253 * 128, 11 * 128, 0 * 128},
{-6 * 128, 254 * 128, 8 * 128, 0 * 128},
{-4 * 128, 255 * 128, 5 * 128, 0 * 128},
{-2 * 128, 0x7fff, 2 * 128, 0 * 128},
{0 * 128, 0 * 128, 0x7fff, 0 * 128}
};
static int lanczos[256] =
{
0,
-2,
-8,
-18,
-33,
-53,
-77,
-106,
-141,
-179,
-223,
-272,
-325,
-384,
-447,
-514,
-586,
-662,
-742,
-826,
-913,
-1004,
-1097,
-1193,
-1290,
-1389,
-1490,
-1591,
-1692,
-1792,
-1892,
-1990,
-2086,
-2179,
-2269,
-2355,
-2436,
-2511,
-2580,
-2643,
-2697,
-2744,
-2781,
-2809,
-2826,
-2832,
-2826,
-2808,
-2776,
-2730,
-2670,
-2594,
-2503,
-2395,
-2271,
-2129,
-1969,
-1790,
-1593,
-1377,
-1141,
-886,
-611,
-315,
0,
336,
692,
1069,
1466,
1884,
2321,
2778,
3255,
3750,
4265,
4797,
5347,
5914,
6498,
7097,
7711,
8340,
8982,
9636,
10301,
10977,
11663,
12357,
13058,
13765,
14477,
15192,
15910,
16630,
17349,
18066,
18781,
18871,
19580,
20285,
20986,
21678,
22361,
23035,
23697,
24348,
24983,
25604,
26206,
26790,
27354,
27898,
28419,
28915,
29387,
29832,
30249,
30638,
30997,
31326,
31623,
31886,
32117,
32314,
32476,
32603,
32695,
32749,
32767, //was 32768, issue for SSE2
32749,
32695,
32603,
32476,
32314,
32117,
31886,
31623,
31326,
30997,
30638,
30249,
29832,
29387,
28915,
28419,
27898,
27354,
26790,
26206,
25604,
24983,
24348,
23697,
23035,
22361,
21678,
20986,
20285,
19580,
18871,
18159,
18066,
17349,
16630,
15910,
15192,
14477,
13765,
13058,
12357,
11663,
10977,
10301,
9636,
8982,
8340,
7711,
7097,
6498,
5914,
5347,
4797,
4265,
3750,
3255,
2778,
2321,
1884,
1466,
1069,
692,
336,
0,
-315,
-611,
-886,
-1141,
-1377,
-1593,
-1790,
-1969,
-2129,
-2271,
-2395,
-2503,
-2594,
-2670,
-2730,
-2776,
-2808,
-2826,
-2832,
-2826,
-2809,
-2781,
-2744,
-2697,
-2643,
-2580,
-2511,
-2436,
-2355,
-2269,
-2179,
-2086,
-1990,
-1892,
-1792,
-1692,
-1591,
-1490,
-1389,
-1290,
-1193,
-1097,
-1004,
-913,
-826,
-742,
-662,
-586,
-514,
-447,
-384,
-325,
-272,
-223,
-179,
-141,
-106,
-77,
-53,
-33,
-18,
-8,
-2,
};
void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom)
{
float yposf, ystepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0, step;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
offset = -offset;
yposf = height * offset;
yposf = (float)height * (0.5f - 1.0f / (2.0f * zoom) - offset);
ystepf = 1.0f / zoom;
if (yposf < 0.0)
neg = 1;
if (pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
step = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
step = 32;
break;
}
{
static char zeroline[1024] = {0};
int y, yoffset = ((int)(yposf - 2.0)), yend = ((int)(yposf + 2.0 + ystepf * height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if (yoffset < 0) yoffset = 0;
if (yend > height) yend = height;
src += pitch * yoffset;
for (y = yoffset; y < yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for (y = 0; y < height; y++)
{
int i, t, yp = ((int)yposf);
int rmdr = 63 - ((int)(yposf * 64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for (i = 0; i < 4; i++)
{
if (yp < 0 || yp >= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes * (yp - yoffset)];
}
yp++;
rmdr += 64;
}
if (t)
{
__m128i half;
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outline128 = (__m128i *)dst;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
for (x = 0; x < widthbytes; x += step)
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
for (x = 0; x < widthbytes; x += step)
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
_mm_storeu_si128(outline128++, o128);
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
_mm_storeu_si128(outline128++, o128);
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
for (x = 0; x < widthbytes; x += step)
{
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
half = o128;
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
half = _mm_srli_epi16(half, 8);
o128 = _mm_srli_epi16(o128, 8);
o128 = _mm_packus_epi16(o128, half);
_mm_storeu_si128(outline128++, o128);
}
break;
}
}
else
{
if (decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
/*ptr = (unsigned char *)buffer;
for(y=0;y<height; y++)
{
int r,g,b,yp = ((int)yposf);
yposf += ystepf;
if(yp<0 || yp>= height)
{
memset(dst, 0, widthbytes);
}
else
{
memcpy(dst, &ptr[widthbytes*yp], widthbytes);
}
dst += pitch;
}*/
}
}
void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset,
float zoom, int xx)
{
float yposf, ystepf;
//int endofSSEline = 0;
unsigned short *scanline[4];
//int spitch = pitch/2;
int neg = 0, step;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos;
uint8_t *outlinePos8;
uint16_t *outlinePos16;
offset = -offset;
//yposf = height * offset;
yposf = (float)height * (0.5f - 1.0f / (2.0f * zoom) - offset);
ystepf = 1.0f / zoom;
if (yposf < 0.0)
neg = 1;
if (pitch < 0)
yposf -= ystepf;
/* yposi = floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (remainf*(float)SUBPIXEL);
yposi = abs(yposi);
if(yposi==0 && tablepos == 0)
return; // no move required
*/
// -3 , 0 best small notch at zero?
//
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
step = 4;
break;
case DECODED_FORMAT_RGB24:
step = 3;
break;
case DECODED_FORMAT_YUYV:
step = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
step = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
step = 6;
break;
default:
assert(0);
break;
}
{
static char zeroline[1024] = {0};
int y, yoffset = ((int)(yposf - 2.0)), yend = ((int)(yposf + 2.0 + ystepf * height));
unsigned char *src = (unsigned char *)RGB48;
unsigned char *dst = (unsigned char *)RGB48;
unsigned char *ptr = (unsigned char *)buffer;
if (yoffset < 0) yoffset = 0;
if (yend > height) yend = height;
src += pitch * yoffset;
for (y = yoffset; y < yend; y++)
{
memcpy(ptr, src, widthbytes);
ptr += widthbytes;
src += pitch;
}
ptr = (unsigned char *)buffer;
for (y = 0; y < height; y++)
{
int i, t, yp = ((int)yposf);
int rmdr = 63 - ((int)(yposf * 64.0) & 63);
int gains[4];
yp -= 1; // use -2 cause a image down shift //DAN20100225
t = 0;
for (i = 0; i < 4; i++)
{
if (yp < 0 || yp >= height) // skip 0 line as the top line was zagged
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)zeroline;
}
else
{
t += gains[i] = lanczos[rmdr];
scanline[i] = (unsigned short *)&ptr[widthbytes * (yp - yoffset)];
}
yp++;
rmdr += 64;
}
if (t)
{
gA = _mm_set1_epi16(gains[0]);
gB = _mm_set1_epi16(gains[1]);
gC = _mm_set1_epi16(gains[2]);
gD = _mm_set1_epi16(gains[3]);
outlinePos8 = (uint8_t *)dst;
outlinePos16 = (uint16_t *)dst;
lineAPos = (uint8_t *)scanline[0];
lineBPos = (uint8_t *)scanline[1];
lineCPos = (uint8_t *)scanline[2];
lineDPos = (uint8_t *)scanline[3];
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
lA = _mm_loadu_si128((__m128i *)lineAPos);
lineAPos += 8;
lB = _mm_loadu_si128((__m128i *)lineBPos);
lineBPos += 8;
lC = _mm_loadu_si128((__m128i *)lineCPos);
lineCPos += 8;
lD = _mm_loadu_si128((__m128i *)lineDPos);
lineDPos += 8;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16 += 4;
break;
case DECODED_FORMAT_WP13:
lA = _mm_loadu_si128((__m128i *)lineAPos);
lineAPos += 6;
lB = _mm_loadu_si128((__m128i *)lineBPos);
lineBPos += 6;
lC = _mm_loadu_si128((__m128i *)lineCPos);
lineCPos += 6;
lD = _mm_loadu_si128((__m128i *)lineDPos);
lineDPos += 6;
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16 += 3;
break;
case DECODED_FORMAT_RG64:
lA = _mm_loadu_si128((__m128i *)lineAPos);
lineAPos += 8;
lB = _mm_loadu_si128((__m128i *)lineBPos);
lineBPos += 8;
lC = _mm_loadu_si128((__m128i *)lineCPos);
lineCPos += 8;
lD = _mm_loadu_si128((__m128i *)lineDPos);
lineDPos += 8;
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16[3] = _mm_extract_epi16(o128, 3);
outlinePos16 += 4;
break;
case DECODED_FORMAT_RG48:
lA = _mm_loadu_si128((__m128i *)lineAPos);
lineAPos += 6;
lB = _mm_loadu_si128((__m128i *)lineBPos);
lineBPos += 6;
lC = _mm_loadu_si128((__m128i *)lineCPos);
lineCPos += 6;
lD = _mm_loadu_si128((__m128i *)lineDPos);
lineDPos += 6;
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
//_mm_storeu_si128((__m128i *)outlinePos, o128);
outlinePos16[0] = _mm_extract_epi16(o128, 0);
outlinePos16[1] = _mm_extract_epi16(o128, 1);
outlinePos16[2] = _mm_extract_epi16(o128, 2);
outlinePos16 += 3;
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128((__m128i *)lineAPos);
lineAPos += 4;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos);
lineBPos += 4;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos);
lineCPos += 4;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos);
lineDPos += 4;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128, 4);
outlinePos8[0] = _mm_extract_epi16(o128, 0);
outlinePos8[1] = _mm_extract_epi16(o128, 1);
outlinePos8[2] = _mm_extract_epi16(o128, 2);
outlinePos8[3] = _mm_extract_epi16(o128, 3);
outlinePos8 += 4;
break;
case DECODED_FORMAT_RGB24:
{
int r, g, b;
b = ((lineAPos[0] * gains[0]) >> 7) +
((lineBPos[0] * gains[1]) >> 7) +
((lineCPos[0] * gains[2]) >> 7) +
((lineDPos[0] * gains[3]) >> 7); //16-bit
g = ((lineAPos[1] * gains[0]) >> 7) +
((lineBPos[1] * gains[1]) >> 7) +
((lineCPos[1] * gains[2]) >> 7) +
((lineDPos[1] * gains[3]) >> 7); //16-bit
r = ((lineAPos[2] * gains[0]) >> 7) +
((lineBPos[2] * gains[1]) >> 7) +
((lineCPos[2] * gains[2]) >> 7) +
((lineDPos[2] * gains[3]) >> 7); //16-bit
if (r < 0) r = 0;
if (r > 65535) r = 65535;
if (g < 0) g = 0;
if (g > 65535) g = 65535;
if (b < 0) b = 0;
if (b > 65535) b = 65535;
lineAPos += 3;
lineBPos += 3;
lineCPos += 3;
lineDPos += 3;
outlinePos8[0] = b >> 8; //b
outlinePos8[1] = g >> 8; //g
outlinePos8[2] = r >> 8; //r
outlinePos8 += 3;
/* SSE2 can't load byte alligned
lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3;
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3;
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3;
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3;
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA,3); //13-bit unsigned
lB = _mm_srli_epi16(lB,3); //13-bit unsigned
lC = _mm_srli_epi16(lC,3); //13-bit unsigned
lD = _mm_srli_epi16(lD,3); //13-bit unsigned
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128,t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128,t1);
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_srli_epi16(o128,4);
outlinePos8[0] = _mm_extract_epi16(o128, 0); //b
outlinePos8[1] = _mm_extract_epi16(o128, 1); //g
outlinePos8[2] = _mm_extract_epi16(o128, 2); //r
outlinePos8+=3;
*/
}
break;
}
}
else
{
if (decoder->StereoBufferFormat == DECODED_FORMAT_YUYV)
{
memset(dst, 0x10801080, widthbytes);
}
else
{
memset(dst, 0, widthbytes);
}
}
yposf += ystepf;
dst += pitch;
}
}
}
void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer,
int widthbytes, int height, int pitch, float offset)
{
float yposf, remainf;
int yposi, tablepos, x, y;
int gainA, gainB, gainC, gainD;
//int endofSSEline = 0;
unsigned short *scanline[4], *tline;
int spitch = pitch / 2;
int neg = 0, shift = 0, skip, step;
int origwidthbytes = widthbytes;
int origwidthextra;
__m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1;
__m128i *lineA, *lineB, *lineC, *lineD, *outline128;
// offset = -offset;
if (offset < 0.0)
neg = 1;
yposf = height * offset;
yposi = (int)floor(yposf);
remainf = yposf - (float)yposi;
tablepos = (int)(remainf * (float)SUBPIXEL);
yposi = abs(yposi);
if (yposi == 0 && tablepos == 0)
return; // no move required
// -3 , 0 best small notch at zero?
//
if (neg)
{
yposi -= 2;
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
}
else
{
yposi -= 1; //offset inherent in the table
gainD = gains[tablepos][0];
gainC = gains[tablepos][1];
gainB = gains[tablepos][2];
gainA = gains[tablepos][3];
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
skip = 4;
step = 16;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
step = 16;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
step = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
default:
skip = 6;
step = 32;
break;
}
// scanline[0] = buffer;
// scanline[1] = buffer + width*skip/2;
// scanline[2] = buffer + width*skip/2*2;
// scanline[3] = buffer + width*skip/2*3;
widthbytes += (step - 1);
widthbytes -= (widthbytes % step);
origwidthextra = (origwidthbytes % step);
scanline[0] = buffer;
scanline[1] = buffer + widthbytes / 2;
scanline[2] = buffer + widthbytes / 2 * 2;
scanline[3] = buffer + widthbytes / 2 * 3;
for (y = 0; y < 4; y++)
{
if (yposi + y >= 0 && yposi + y < height)
{
unsigned short *ptr = RGB48;
if (neg)
ptr += (height - 1 - yposi - y) * spitch;
else
ptr += (yposi + y) * spitch;
memcpy(scanline[y], ptr, origwidthbytes);
}
else
{
memset(scanline[y], 0, origwidthbytes);
}
}
{
for (y = 0; y < height; y++)
{
unsigned short *ptr = RGB48;
if (neg)
ptr += (height - y - 1) * spitch;
else
ptr += y * spitch;
outline128 = (__m128i *)ptr;
lineA = (__m128i *)scanline[0];
lineB = (__m128i *)scanline[1];
lineC = (__m128i *)scanline[2];
lineD = (__m128i *)scanline[3];
//for(x=0;x<width*skip/2; x+=step)
for (x = 0; x < widthbytes; x += step)
{
__m128i half;
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA);
lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB);
lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC);
lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD);
lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
if (shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
}
if (skip == 6) //RGB48 || WP13
{
if (widthbytes == origwidthbytes || x + 16 < origwidthbytes)
_mm_storeu_si128(outline128++, o128);
else
{
//if(x < origwidthbytes+16/*bytes in an SSE2 reg*/)
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra);
outline128++;
}
}
else
{
half = o128;
}
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_WP13:
{
lA = _mm_loadu_si128(lineA++);
lB = _mm_loadu_si128(lineB++);
lC = _mm_loadu_si128(lineC++);
lD = _mm_loadu_si128(lineD++);
shift = 0;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_RG48:
{
lA = _mm_loadu_si128(lineA++);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_loadu_si128(lineB++);
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_loadu_si128(lineC++);
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_loadu_si128(lineD++);
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
shift = 3;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_YUYV:
lA = _mm_loadu_si128(lineA++);
lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA);
lB = _mm_loadu_si128(lineB++);
lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB);
lC = _mm_loadu_si128(lineC++);
lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC);
lD = _mm_loadu_si128(lineD++);
lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD);
lA = _mm_srli_epi16(lA, 3); //13-bit unsigned
lB = _mm_srli_epi16(lB, 3); //13-bit unsigned
lC = _mm_srli_epi16(lC, 3); //13-bit unsigned
lD = _mm_srli_epi16(lD, 3); //13-bit unsigned
shift = 3;
break;
}
o128 = _mm_mulhi_epi16(lA, gA);
t1 = _mm_mulhi_epi16(lB, gB);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lC, gC);
o128 = _mm_adds_epi16(o128, t1);
t1 = _mm_mulhi_epi16(lD, gD);
o128 = _mm_adds_epi16(o128, t1);
if (shift)
{
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
}
if (skip != 6) //!RGB48 || !WP13
{
half = _mm_srli_epi16(half, 8);
o128 = _mm_srli_epi16(o128, 8);
o128 = _mm_packus_epi16(o128, half);
}
if (widthbytes == origwidthbytes || x + 32 < origwidthbytes)
{
_mm_storeu_si128(outline128++, o128);
}
else
{
//if(x+16 < origwidthbytes+16)
if (origwidthextra > 16)
{
_mm_storeu_si128((__m128i *)scanline[0], o128);
memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16);
}
outline128++;
}
}
tline = scanline[0];
scanline[0] = scanline[1];
scanline[1] = scanline[2];
scanline[2] = scanline[3];
scanline[3] = tline;
if (yposi + y + 4 >= 0 && yposi + y + 4 < height)
{
unsigned short *ptr = RGB48;
if (neg)
ptr += (height - 1 - (yposi + y + 4)) * spitch;
else
ptr += (yposi + y + 4) * spitch;
memcpy(scanline[3], ptr, origwidthbytes);
}
else
{
memset(scanline[3], 0, origwidthbytes);
}
}
}
}
void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf, xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if (flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width * 3) - 3;
for (x = 0; x < width / 2; x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 6;
}
}
if (eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width * (0.5f - 1.0f / (2.0f * zoom) - offset);
xposf -= width * roffset * 0.5f / zoom;
xposf += (float)line * ((float)width * roffset / ((float)height * zoom));
if (xposf < 0.0)
neg = 1;
xstepf = 1.0f / zoom;
memcpy(scanline, RGB48, width * 3 * 2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f) * 2.0f;
// int holdstart = width*5/10; // Use to specify a area of uniform stretch
// int holdend = width*5/10;
int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth * 0.125) * (float)width);
int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth * 0.125) * (float)width);
float flatxstep;
float modified_xstep_avg;
float bottomxstep;
float basexstepstart;
float basexstepend;
float range;
if (holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth * 0.5) * (float)width);
if (holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth * 0.5) * (float)width);
range = (float)(holdend - holdstart);
flatxstep = xstep - z * 0.5f * xstep;
modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range);
bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
if (holdstart == (width - holdend))
{
basexstepstart = bottomxstep;
basexstepend = bottomxstep;
}
else if (holdstart < (width - holdend))
{
float a = (float)holdstart / (float)(width - holdend);
float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float endavg = (modified_xstep_avg * ((float)width - range) - startavg * (float)holdstart) / (float)(width - holdend);
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
else
{
float a = (float)(width - holdend) / (float)holdstart;
float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep;
float startavg = (modified_xstep_avg * ((float)width - range) - endavg * (float)(width - holdend)) / (float)holdstart;
basexstepstart = startavg - (flatxstep - startavg);
basexstepend = endavg - (flatxstep - endavg);
}
if (decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
float fxpos = xbase;
for (x = 0; x < width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if (z != 0.0)
{
if (x < holdstart)
{
fxpos += basexstepstart * ((float)(holdstart - x) / (float)holdstart) + flatxstep * ((float)x / (float)holdstart);
}
else if (x > holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend * ((float)(range - diff) / (float)range) + flatxstep * ((float)(diff) / (float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f * (float)width);
rmdr = 63 - ((xp >> 10) & 63);
xp >>= 16;
}
else
{
xp = ixpos >> 16;
rmdr = 63 - ((ixpos >> 10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
{
int i, r = 0, g = 0, b = 0;
for (i = 0; i < 4; i++)
{
if (xp <= 0 || xp >= width)
{
gains += lanczos[rmdr] >> 1;
}
else
{
gains += lanczos[rmdr] >> 1;
r += (gains * sscanline[xp * 3]);
g += (gains * sscanline[xp * 3 + 1]);
b += (gains * sscanline[xp * 3 + 2]);
gains = 0;
}
xp++;
rmdr += 64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if (r < 0) r = 0;
else if (r > 65535) r = 65535;
if (g < 0) g = 0;
else if (g > 65535) g = 65535;
if (b < 0) b = 0;
else if (b > 65535) b = 65535;
RGB48[xx] = r;
RGB48[xx + 1] = g;
RGB48[xx + 2] = b;
}
xx += 3;
}
}
else
{
float fxpos = xbase;
for (x = 0; x < width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if (z != 0.0)
{
if (x < holdstart)
{
fxpos += basexstepstart * ((float)(holdstart - x) / (float)holdstart) + flatxstep * ((float)x / (float)holdstart);
}
else if (x > holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += basexstepend * ((float)(range - diff) / (float)range) + flatxstep * ((float)(diff) / (float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f * (float)width);
rmdr = 63 - ((xp >> 10) & 63);
xp >>= 16;
}
else
{
xp = ixpos >> 16;
rmdr = 63 - ((ixpos >> 10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
{
int i, r = 0, g = 0, b = 0;
for (i = 0; i < 4; i++)
{
if (xp <= 0 || xp >= width)
{
gains += lanczos[rmdr] >> 1;
}
else
{
gains += lanczos[rmdr] >> 1;
r += (gains * scanline[xp * 3]);
g += (gains * scanline[xp * 3 + 1]);
b += (gains * scanline[xp * 3 + 2]);
gains = 0;
}
xp++;
rmdr += 64;
}
r >>= 14;
g >>= 14;
b >>= 14;
if (r < 0) r = 0;
else if (r > 65535) r = 65535;
if (g < 0) g = 0;
else if (g > 65535) g = 65535;
if (b < 0) b = 0;
else if (b > 65535) b = 65535;
RGB48[xx] = r;
RGB48[xx + 1] = g;
RGB48[xx + 2] = b;
}
xx += 3;
}
}
}
}
void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye)
{
float xposf, xstepf;
int x;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
short *sscanline = (short *)buffer;
int neg = 0;
float offset = hoffset;
if (flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width * 4) - 4;
for (x = 0; x < width / 2; x++)
{
int t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
t = *ptrL;
*ptrL++ = *ptrR;
*ptrR++ = t;
ptrR -= 4;
}
}
if (eye > 0)
{
zoom *= 1.0f + frameTilt;
}
else
{
zoom /= 1.0f + frameTilt;
}
xposf = (float)width * (0.5f - 1.0f / (2.0f * zoom) - offset);
xposf -= width * roffset * 0.5f;
xposf += line * (width * roffset / ((float)height * zoom));
if (xposf < 0.0)
neg = 1;
xstepf = 1.0f / zoom;
memcpy(scanline, RGB48, width * 4 * 2);
{
//unsigned short zeroline[3] = {0};
int xx = 0;
int ixpos = (int)(xposf * 65536.0f);
int ixstep = (int)(xstepf * 65536.0f);
float xbase = xposf / (float)width;
float xstep = xstepf / (float)width;
float z = (decoder->cfhddata.FrameHDynamic - 1.0f) * 2.0f;
int holdstart = width * 5 / 10; // Use to specify a area of uniform stretch
int holdend = width * 5 / 10;
float flatxstep = xstep - z * 0.5f * xstep;
float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart));
float bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
if (bottomxstep < 0.0)
{
bottomxstep = 0.0;
flatxstep = modified_xstep_avg + modified_xstep_avg;
}
if (flatxstep < 0.0)
{
flatxstep = 0.0;
bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg);
}
if (decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
float fxpos = xbase;
for (x = 0; x < width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if (z != 0.0)
{
if (x < holdstart)
{
fxpos += bottomxstep * ((float)(holdstart - x) / (float)holdstart) + flatxstep * ((float)x / (float)holdstart);
}
else if (x > holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep * ((float)(range - diff) / (float)range) + flatxstep * ((float)(diff) / (float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f * (float)width);
rmdr = 63 - ((xp >> 10) & 63);
xp >>= 16;
}
else
{
xp = ixpos >> 16;
rmdr = 63 - ((ixpos >> 10) & 63);
ixpos += ixstep;
}
xp -= 1;// was -2 causing a right shift //DAN20100225
{
int i, r = 0, g = 0, b = 0, a = 0;
for (i = 0; i < 4; i++)
{
if (xp <= 0 || xp >= width)
{
gains += lanczos[rmdr] >> 1;
}
else
{
gains += lanczos[rmdr] >> 1;
r += (gains * sscanline[xp * 4]);
g += (gains * sscanline[xp * 4 + 1]);
b += (gains * sscanline[xp * 4 + 2]);
a += (gains * sscanline[xp * 4 + 3]);
gains = 0;
}
xp++;
rmdr += 64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if (r < 0) r = 0;
else if (r > 65535) r = 65535;
if (g < 0) g = 0;
else if (g > 65535) g = 65535;
if (b < 0) b = 0;
else if (b > 65535) b = 65535;
if (a < 0) a = 0;
else if (a > 65535) a = 65535;
RGB48[xx] = r;
RGB48[xx + 1] = g;
RGB48[xx + 2] = b;
RGB48[xx + 3] = a;
}
xx += 4;
}
}
else
{
float fxpos = xbase;
for (x = 0; x < width; x++) //RGB
{
int gains = 0;
int xp, rmdr;
if (z != 0.0)
{
if (x < holdstart)
{
fxpos += bottomxstep * ((float)(holdstart - x) / (float)holdstart) + flatxstep * ((float)x / (float)holdstart);
}
else if (x > holdend)
{
int diff = width - x;
int range = width - holdend;
fxpos += bottomxstep * ((float)(range - diff) / (float)range) + flatxstep * ((float)(diff) / (float)range);
}
else
{
fxpos += flatxstep;
}
xp = (int)(fxpos * 65536.0f * (float)width);
rmdr = 63 - ((xp >> 10) & 63);
xp >>= 16;
}
else
{
xp = ixpos >> 16;
rmdr = 63 - ((ixpos >> 10) & 63);
ixpos += ixstep;
}
xp -= 1; // was -2 causing a right shift //DAN20100225
{
int i, r = 0, g = 0, b = 0, a = 0;
for (i = 0; i < 4; i++)
{
if (xp <= 0 || xp >= width)
{
gains += lanczos[rmdr] >> 1;
}
else
{
gains += lanczos[rmdr] >> 1;
r += (gains * scanline[xp * 4]);
g += (gains * scanline[xp * 4 + 1]);
b += (gains * scanline[xp * 4 + 2]);
a += (gains * scanline[xp * 4 + 3]);
gains = 0;
}
xp++;
rmdr += 64;
}
r >>= 14;
g >>= 14;
b >>= 14;
a >>= 14;
if (r < 0) r = 0;
else if (r > 65535) r = 65535;
if (g < 0) g = 0;
else if (g > 65535) g = 65535;
if (b < 0) b = 0;
else if (b > 65535) b = 65535;
if (a < 0) a = 0;
else if (a > 65535) a = 65535;
RGB48[xx] = r;
RGB48[xx + 1] = g;
RGB48[xx + 2] = b;
RGB48[xx + 3] = a;
}
xx += 4;
}
}
}
}
void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask)
{
float line = (float)width * fabsf(windowMask);
int pixelbytes = 6;
float frac = (float)(line - (float)((int)line));
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
pixelbytes = 8;
break;
}
if (decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed
{
short *ptrL = (short *)RGB48;
short *ptrR = (short *)RGB48;
if (windowMask < 0)
channel = channel == 0 ? 1 : 0;
if (pixelbytes == 6)
{
if (channel == 0)
{
memset(ptrL, 0, 6 * (int)line);
ptrL += ((int)line * 3);
ptrL[0] = (int)((float)ptrL[0] * (1.0 - frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0 - frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0 - frac));
}
else
{
ptrR += ((width - (int)line) * 3);
memset(ptrR, 0, 6 * (int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0 - frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0 - frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0 - frac));
}
}
else
{
if (channel == 0)
{
memset(ptrL, 0, 8 * (int)line);
ptrL += ((int)line * 4);
ptrL[0] = (int)((float)ptrL[0] * (1.0 - frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0 - frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0 - frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0 - frac));
}
else
{
ptrR += ((width - (int)line) * 4);
memset(ptrR, 0, 8 * (int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0 - frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0 - frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0 - frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0 - frac));
}
}
}
else
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
if (windowMask < 0)
channel = channel == 0 ? 1 : 0;
if (pixelbytes == 6)
{
if (channel == 0)
{
memset(ptrL, 0, 6 * (int)line);
ptrL += ((int)line * 3);
ptrL[0] = (int)((float)ptrL[0] * (1.0 - frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0 - frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0 - frac));
}
else
{
ptrR += ((width - (int)line) * 3);
memset(ptrR, 0, 6 * (int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0 - frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0 - frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0 - frac));
}
}
else
{
if (channel == 0)
{
memset(ptrL, 0, 8 * (int)line);
ptrL += ((int)line * 4);
ptrL[0] = (int)((float)ptrL[0] * (1.0 - frac));
ptrL[1] = (int)((float)ptrL[1] * (1.0 - frac));
ptrL[2] = (int)((float)ptrL[2] * (1.0 - frac));
ptrL[3] = (int)((float)ptrL[3] * (1.0 - frac));
}
else
{
ptrR += ((width - (int)line) * 4);
memset(ptrR, 0, 8 * (int)line);
ptrR[-1] = (int)((float)ptrR[-1] * (1.0 - frac));
ptrR[-2] = (int)((float)ptrR[-2] * (1.0 - frac));
ptrR[-3] = (int)((float)ptrR[-3] * (1.0 - frac));
ptrR[-4] = (int)((float)ptrR[-4] * (1.0 - frac));
}
}
}
}
void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf, remainf;
int xposi, tablepos, x;
int gainA, gainB, gainC, gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0, shift = 0;
__m128i l1, l2, l3, gA, gB, gC, gD, o128, t1, t2;
__m128i *line128, *outline128;
if (flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width * 3) - 3;
for (x = 0; x < width / 2; x++)
{
int t1, t2, t3;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
ptrL += 3;
ptrR -= 3;
}
}
if (offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf * (float)SUBPIXEL);
xposi = abs(xposi);
if (xposi == 0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if (neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width - xposi + 16;
if (nwidth > width)
nwidth = width;
for (x = 0; x < xposi + 2; x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
memcpy(ptr, RGB48, (nwidth) * 3 * 2);
ptr += (nwidth) * 3;
for (x = 0; x < 16; x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for (x = 0; x < 2; x++)
{
if (x + xposi - 2 >= 0)
{
*ptr++ = RGB48[(x + xposi - 2) * 3]; //r
*ptr++ = RGB48[(x + xposi - 2) * 3 + 1]; //g
*ptr++ = RGB48[(x + xposi - 2) * 3 + 2]; //b
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
memcpy(ptr, &RGB48[xposi * 3], (width - xposi) * 3 * 2);
ptr += (width - xposi) * 3;
for (x = 0; x < xposi + 16; x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if (decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1, 3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2, 3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3, 3); //13-bit unsigned
shift = 3;
}
for (x = 0; x < width * 3; x += 8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1, 3 * 2);
t2 = _mm_slli_si128(l2, 5 * 2);
t1 = _mm_adds_epi16(t1, t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128, t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1, 3 * 2);
t2 = _mm_srli_si128(l2, 3 * 2);
t2 = _mm_slli_si128(t2, 5 * 2);
t1 = _mm_adds_epi16(t1, t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128, t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1, 3 * 2);
t2 = _mm_srli_si128(l2, 6 * 2);
t2 = _mm_slli_si128(t2, 5 * 2);
t1 = _mm_adds_epi16(t1, t2);
t2 = _mm_slli_si128(l3, 7 * 2);
t1 = _mm_adds_epi16(t1, t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128, t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if (shift)
{
l3 = _mm_srli_epi16(l3, 3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip)
{
float xposf, remainf;
int xposi, tablepos, x;
int gainA, gainB, gainC, gainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int neg = 0, shift = 0;
__m128i l1, l2, l3, gA, gB, gC, gD, o128, t1, t2;
__m128i *line128, *outline128;
if (flip)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width * 4) - 4;
for (x = 0; x < width / 2; x++)
{
int t1, t2, t3, t4;
t1 = ptrL[0];
ptrL[0] = ptrR[0];
ptrR[0] = t1;
t2 = ptrL[1];
ptrL[1] = ptrR[1];
ptrR[1] = t2;
t3 = ptrL[2];
ptrL[2] = ptrR[2];
ptrR[2] = t3;
t4 = ptrL[2];
ptrL[3] = ptrR[3];
ptrR[3] = t4;
ptrL += 4;
ptrR -= 4;
}
}
if (offset < 0.0)
neg = 1;
xposf = width * offset;
xposi = (int)floorf(xposf);
remainf = xposf - (float)xposi;
tablepos = (int)(remainf * (float)SUBPIXEL);
xposi = abs(xposi);
if (xposi == 0 && tablepos == 0)
return; // no move required
gainA = gains[tablepos][0];
gainB = gains[tablepos][1];
gainC = gains[tablepos][2];
gainD = gains[tablepos][3];
if (neg == 0)
{
unsigned short *ptr = scanline;
int nwidth = width - xposi + 16;
if (nwidth > width)
nwidth = width;
for (x = 0; x < xposi + 2; x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
memcpy(ptr, RGB48, (nwidth) * 4 * 2);
ptr += (nwidth) * 4;
for (x = 0; x < 16; x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
else
{
unsigned short *ptr = scanline;
for (x = 0; x < 2; x++)
{
if (x + xposi - 2 >= 0)
{
*ptr++ = RGB48[(x + xposi - 2) * 4]; //r
*ptr++ = RGB48[(x + xposi - 2) * 4 + 1]; //g
*ptr++ = RGB48[(x + xposi - 2) * 4 + 2]; //b
*ptr++ = RGB48[(x + xposi - 2) * 4 + 3]; //a
}
else
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
memcpy(ptr, &RGB48[xposi * 4], (width - xposi) * 4 * 2);
ptr += (width - xposi) * 4;
for (x = 0; x < xposi + 16; x++)
{
*ptr++ = 0;//r
*ptr++ = 0;//g
*ptr++ = 0;//b
*ptr++ = 0;//a
}
}
gA = _mm_set1_epi16(gainA);
gB = _mm_set1_epi16(gainB);
gC = _mm_set1_epi16(gainC);
gD = _mm_set1_epi16(gainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2,
//l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4,
//l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6,
//l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8,
if (decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1, 3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2, 3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3, 3); //13-bit unsigned
shift = 3;
}
for (x = 0; x < width * 4; x += 8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA);
//t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0
//t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4
//t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4
//l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1, 4 * 2);
t2 = _mm_slli_si128(l2, 4 * 2);
t1 = _mm_adds_epi16(t1, t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB);
o128 = _mm_adds_epi16(o128, t1);
//t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0
//t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0
//t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4
//t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4
//l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1, 4 * 2);
t2 = _mm_srli_si128(l2, 4 * 2);
t2 = _mm_slli_si128(t2, 4 * 2);
t1 = _mm_adds_epi16(t1, t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC);
o128 = _mm_adds_epi16(o128, t1);
//t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0
//t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5
//t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1, 4 * 2);
t2 = _mm_slli_si128(l3, 4 * 2);
t1 = _mm_adds_epi16(t1, t2);
t1 = _mm_mulhi_epi16(t1, gD);
o128 = _mm_adds_epi16(o128, t1);
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if (shift)
{
l3 = _mm_srli_epi16(l3, 3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width,
float offsetR, float offsetG, float offsetB,
int flipR, int flipG, int flipB)
{
float Rxposf, Rremainf;
int Rxposi, Rtablepos;
float Gxposf, Gremainf;
int Gxposi, Gtablepos;
float Bxposf, Bremainf;
int Bxposi, Btablepos;
int x;
int RgainA, RgainB, RgainC, RgainD;
int GgainA, GgainB, GgainC, GgainD;
int BgainA, BgainB, BgainC, BgainD;
//int endofSSEline = 0;
unsigned short *scanline = (unsigned short *)buffer;
int negR = 0;
int negG = 0;
int negB = 0;
int shift = 0;
__m128i l1, l2, l3, o128, t1, t2;
__m128i *line128, *outline128;
__m128i gA1, gB1, gC1, gD1, gA2, gB2, gC2, gD2, gA3, gB3, gC3, gD3;
if (flipR)
{
unsigned short *ptrL = RGB48;
unsigned short *ptrR = RGB48;
ptrR += (width * 3) - 3;
for (x = 0; x < width / 2; x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if (flipG)
{
unsigned short *ptrL = &RGB48[1];
unsigned short *ptrR = &RGB48[1];
ptrR += (width * 3) - 3;
for (x = 0; x < width / 2; x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if (flipB)
{
unsigned short *ptrL = &RGB48[2];
unsigned short *ptrR = &RGB48[2];
ptrR += (width * 3) - 3;
for (x = 0; x < width / 2; x++)
{
int t;
t = *ptrL;
*ptrL = *ptrR;
*ptrR = t;
ptrL += 3;
ptrR -= 3;
}
}
if (offsetR < 0.0)
negR = 1;
if (offsetG < 0.0)
negG = 1;
if (offsetB < 0.0)
negB = 1;
Rxposf = width * offsetR;
Rxposi = (int)floorf(Rxposf);
Rremainf = Rxposf - (float)Rxposi;
Rtablepos = (int)(Rremainf * (float)SUBPIXEL);
Gxposf = width * offsetG;
Gxposi = (int)floorf(Gxposf);
Gremainf = Gxposf - (float)Gxposi;
Gtablepos = (int)(Gremainf * (float)SUBPIXEL);
Bxposf = width * offsetB;
Bxposi = (int)floorf(Bxposf);
Bremainf = Bxposf - (float)Bxposi;
Btablepos = (int)(Bremainf * (float)SUBPIXEL);
Rxposi = abs(Rxposi);
Gxposi = abs(Gxposi);
Bxposi = abs(Bxposi);
if (Rxposi == 0 && Rtablepos == 0)
return; // no move required
RgainA = gains[Rtablepos][0];
RgainB = gains[Rtablepos][1];
RgainC = gains[Rtablepos][2];
RgainD = gains[Rtablepos][3];
GgainA = gains[Gtablepos][0];
GgainB = gains[Gtablepos][1];
GgainC = gains[Gtablepos][2];
GgainD = gains[Gtablepos][3];
BgainA = gains[Btablepos][0];
BgainB = gains[Btablepos][1];
BgainC = gains[Btablepos][2];
BgainD = gains[Btablepos][3];
if (negR == 0)
{
unsigned short *ptr = scanline;
int nwidth = width - Rxposi + 16;
if (nwidth > width)
nwidth = width;
for (x = 0; x < Rxposi + 2; x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
for (x = 0; x < nwidth; x++)
{
*ptr++ = RGB48[x * 3]; //r
ptr++;//g
ptr++;//b
}
for (x = 0; x < 16; x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for (x = 0; x < 2; x++)
{
if (x + Rxposi - 2 >= 0)
{
*ptr++ = RGB48[(x + Rxposi - 2) * 3]; //r
ptr++;//g
ptr++;//b
}
else
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for (x = Rxposi; x < width; x++)
{
*ptr++ = RGB48[x * 3]; //r
ptr++;//g
ptr++;//b
}
for (x = 0; x < Rxposi + 16; x++)
{
*ptr++ = 0;//r
ptr++;//g
ptr++;//b
}
}
if (negG == 0)
{
unsigned short *ptr = scanline;
int nwidth = width - Gxposi + 16;
if (nwidth > width)
nwidth = width;
for (x = 0; x < Gxposi + 2; x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
for (x = 0; x < nwidth; x++)
{
ptr++;//r
*ptr++ = RGB48[x * 3 + 1]; //g
ptr++;//b
}
for (x = 0; x < 16; x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
else
{
unsigned short *ptr = scanline;
for (x = 0; x < 2; x++)
{
if (x + Gxposi - 2 >= 0)
{
ptr++;//r
*ptr++ = RGB48[(x + Gxposi - 2) * 3 + 1]; //g
ptr++;//b
}
else
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for (x = Gxposi; x < width; x++)
{
ptr++;//r
*ptr++ = RGB48[x * 3 + 1]; //g
ptr++;//b
}
for (x = 0; x < Gxposi + 16; x++)
{
ptr++;//r
*ptr++ = 0;//g
ptr++;//b
}
}
if (negB == 0)
{
unsigned short *ptr = scanline;
int nwidth = width - Bxposi + 16;
if (nwidth > width)
nwidth = width;
for (x = 0; x < Bxposi + 2; x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
for (x = 0; x < nwidth; x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x * 3 + 2]; //b
}
for (x = 0; x < 16; x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
else
{
unsigned short *ptr = scanline;
for (x = 0; x < 2; x++)
{
if (x + Bxposi - 2 >= 0)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[(x + Bxposi - 2) * 3 + 2]; //b
}
else
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
//memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2);
//ptr += (width-xposi)*3;
for (x = Bxposi; x < width; x++)
{
ptr++;//r
ptr++;//g
*ptr++ = RGB48[x * 3 + 2]; //b
}
for (x = 0; x < Bxposi + 16; x++)
{
ptr++;//r
ptr++;//g
*ptr++ = 0;//b
}
}
gA1 = _mm_set_epi16(RgainA, GgainA, BgainA, RgainA, GgainA, BgainA, RgainA, GgainA);
gA2 = _mm_set_epi16(BgainA, RgainA, GgainA, BgainA, RgainA, GgainA, BgainA, RgainA);
gA3 = _mm_set_epi16(GgainA, BgainA, RgainA, GgainA, BgainA, RgainA, GgainA, BgainA);
gB1 = _mm_set_epi16(RgainB, GgainB, BgainB, RgainB, GgainB, BgainB, RgainB, GgainB);
gB2 = _mm_set_epi16(BgainB, RgainB, GgainB, BgainB, RgainB, GgainB, BgainB, RgainB);
gB3 = _mm_set_epi16(GgainB, BgainB, RgainB, GgainB, BgainB, RgainB, GgainB, BgainB);
gC1 = _mm_set_epi16(RgainC, GgainC, BgainC, RgainC, GgainC, BgainC, RgainC, GgainC);
gC2 = _mm_set_epi16(BgainC, RgainC, GgainC, BgainC, RgainC, GgainC, BgainC, RgainC);
gC3 = _mm_set_epi16(GgainC, BgainC, RgainC, GgainC, BgainC, RgainC, GgainC, BgainC);
gD1 = _mm_set_epi16(RgainD, GgainD, BgainD, RgainD, GgainD, BgainD, RgainD, GgainD);
gD2 = _mm_set_epi16(BgainD, RgainD, GgainD, BgainD, RgainD, GgainD, BgainD, RgainD);
gD3 = _mm_set_epi16(GgainD, BgainD, RgainD, GgainD, BgainD, RgainD, GgainD, BgainD);
line128 = (__m128i *)&scanline[0];
//outline128 = line128;
outline128 = (__m128i *)&RGB48[0];
//l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3,
//l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6
//l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8
if (decoder->StereoBufferFormat == DECODED_FORMAT_WP13)
{
l1 = _mm_loadu_si128(line128++);
l2 = _mm_loadu_si128(line128++);
l3 = _mm_loadu_si128(line128++);
shift = 0;
}
else
{
l1 = _mm_loadu_si128(line128++);
l1 = _mm_srli_epi16(l1, 3); //13-bit unsigned
l2 = _mm_loadu_si128(line128++);
l2 = _mm_srli_epi16(l2, 3); //13-bit unsigned
l3 = _mm_loadu_si128(line128++);
l3 = _mm_srli_epi16(l3, 3); //13-bit unsigned
shift = 3;
}
for (x = 0; x < width * 3; x += 8)
{
//o=l1* gainA
o128 = _mm_mulhi_epi16(l1, gA1);
//t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0
//t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4
//t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4
//l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4
//t1 *= gainB
//o += t1
t1 = _mm_srli_si128(l1, 3 * 2);
t2 = _mm_slli_si128(l2, 5 * 2);
t1 = _mm_adds_epi16(t1, t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gB1);
o128 = _mm_adds_epi16(o128, t1);
//t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0
//t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0
//t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5
//t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5
//l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5
//t1 *= gainC
//o += t1
t1 = _mm_srli_si128(l1, 3 * 2);
t2 = _mm_srli_si128(l2, 3 * 2);
t2 = _mm_slli_si128(t2, 5 * 2);
t1 = _mm_adds_epi16(t1, t2);
l1 = t1;
t1 = _mm_mulhi_epi16(t1, gC1);
o128 = _mm_adds_epi16(o128, t1);
//t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0
//t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0
//t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0
//t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6
//t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6
//t1 *= gainD
//o += t1
t1 = _mm_srli_si128(l1, 3 * 2);
t2 = _mm_srli_si128(l2, 6 * 2);
t2 = _mm_slli_si128(t2, 5 * 2);
t1 = _mm_adds_epi16(t1, t2);
t2 = _mm_slli_si128(l3, 7 * 2);
t1 = _mm_adds_epi16(t1, t2);
t1 = _mm_mulhi_epi16(t1, gD1);
o128 = _mm_adds_epi16(o128, t1);
t1 = gA1;
gA1 = gA2;
gA2 = gA3;
gA3 = t1;
t1 = gB1;
gB1 = gB2;
gB2 = gB3;
gB3 = t1;
t1 = gC1;
gC1 = gC2;
gC2 = gC3;
gC3 = t1;
t1 = gD1;
gD1 = gD2;
gD2 = gD3;
gD3 = t1;
l1 = l2;
l2 = l3;
l3 = _mm_loadu_si128(line128++);
if (shift)
{
l3 = _mm_srli_epi16(l3, 3); //13-bit unsigned
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff));
o128 = _mm_slli_epi16(o128, 4);
}
else
{
// upper limit to 32767
o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff));
o128 = _mm_slli_epi16(o128, 1);
}
_mm_storeu_si128(outline128++, o128);
}
}
void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint)
{
int x, val, ypos = 0, upos = 1, vpos = 3;
int step = 1, pos = 0;
short *ssbase = (short *)sbase;
uint32_t *lbase = (uint32_t *)sbase;
ToolsHandle *tools = decoder->tools;
int scaledvectorscope = 0;
if (tools == NULL)
return;
if (whitepoint == 13)
{
if (format == DECODED_FORMAT_RG64)
format = DECODED_FORMAT_W13A;
else
format = DECODED_FORMAT_WP13;
}
while (width / step > 360)
{
step *= 2;
}
tools->waveformWidth = width / step;
decoder->tools->blurUVdone = 0;
switch (format & 0xffffff)
{
case DECODED_FORMAT_WP13:
decoder->tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
R = ssbase[0] >> 5;
G = ssbase[1] >> 5;
B = ssbase[2] >> 5;
if (R > 255) R = 255;
if (R < 0) R = 0;
if (G > 255) G = 255;
if (G < 0) G = 0;
if (B > 255) B = 255;
if (B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
ssbase += step * 3;
}
break;
case DECODED_FORMAT_W13A:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
R = ssbase[0] >> 5;
G = ssbase[1] >> 5;
B = ssbase[2] >> 5;
if (R > 255) R = 255;
if (R < 0) R = 0;
if (G > 255) G = 255;
if (G < 0) G = 0;
if (B > 255) B = 255;
if (B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
ssbase += step * 4;
}
break;
case DECODED_FORMAT_RG48:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
R = sbase[0] >> 8;
G = sbase[1] >> 8;
B = sbase[2] >> 8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
sbase += step * 3;
}
break;
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG30:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
val = lbase[x];
R = (val >> 22) & 0xff;
G = (val >> 12) & 0xff;
B = (val >> 02) & 0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_AR10:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
val = lbase[x];
B = (val >> 22) & 0xff;
G = (val >> 12) & 0xff;
R = (val >> 02) & 0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_R210:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
val = SwapInt32BtoN(lbase[x]);
R = (val >> 22) & 0xff;
G = (val >> 12) & 0xff;
B = (val >> 02) & 0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_DPX0:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
val = SwapInt32BtoN(lbase[x]);
R = (val >> 24) & 0xff;
G = (val >> 14) & 0xff;
B = (val >> 04) & 0xff;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int32_t R, G, B, U, V;
R = sbase[1] >> 8;
G = sbase[2] >> 8;
B = sbase[3] >> 8;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
sbase += step * 4;
}
break;
case COLOR_FORMAT_UYVY:
ypos = 1, upos = 0, vpos = 2;
case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109
case COLOR_FORMAT_YUYV:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int Y, U, V, R, G, B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 2;
Y = bptr[ypos] - 16;
U = bptr[upos] - 128;
Y += bptr[ypos + 2] - 16;
Y >>= 1;
V = bptr[vpos] - 128;
R = (9535 * Y + 14688 * V) >> 13; //13-bit white
G = (9535 * Y - 4375 * V - 1745 * U) >> 13;
B = (9535 * Y + 17326 * U) >> 13;
//TODO much -20 to 120 RGB range.
if (R > 255) R = 255;
if (R < 0) R = 0;
if (G > 255) G = 255;
if (G < 0) G = 0;
if (B > 255) B = 255;
if (B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if (scaledvectorscope)
{
U *= 255;
U /= 314;
V *= 255;
V /= 244;
}
//* 255.0/314.0
//* 255.0/244.0
U += 128;
V += 128;
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_YU64:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int Y, U, V, R, G, B;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
bptr++; //read only the high byte out of the 16-bit
Y = bptr[0] - 16;
V = bptr[2] - 128;
Y += bptr[4] - 16;
Y >>= 1;
U = bptr[6] - 128;
R = (9535 * Y + 14688 * V) >> 13; //13-bit white
G = (9535 * Y - 4375 * V - 1745 * U) >> 13;
B = (9535 * Y + 17326 * U) >> 13;
if (R > 255) R = 255;
if (R < 0) R = 0;
if (G > 255) G = 255;
if (G < 0) G = 0;
if (B > 255) B = 255;
if (B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if (scaledvectorscope)
{
U *= 255;
U /= 314;
V *= 255;
V /= 244;
}
U += 128;
V += 128;
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_V210:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int Y, U, V, R, G, B;
uint32_t *lptr = (uint32_t *)sbase;
lptr += (x / 6) * 4;
switch (x % 6)
{
case 0:
V = ((*lptr >> 02) & 0xff) - 128;
Y = ((*lptr >> 12) & 0xff) - 16;
U = ((*lptr >> 22) & 0xff) - 128;
lptr++;
Y += ((*lptr >> 02) & 0xff) - 16;
Y >>= 1;
break;
case 1:
lptr++;
Y = ((*lptr >> 02) & 0xff) - 16;
V = ((*lptr >> 12) & 0xff) - 128;
Y += ((*lptr >> 22) & 0xff) - 16;
Y >>= 1;
lptr--;
U = ((*lptr >> 22) & 0xff) - 128;
break;
case 2:
lptr++;
Y = ((*lptr >> 22) & 0xff) - 16;
lptr++;
U = ((*lptr >> 02) & 0xff) - 128;
Y += ((*lptr >> 12) & 0xff) - 16;
Y >>= 1;
V = ((*lptr >> 22) & 0xff) - 128;
break;
case 3:
lptr++;
V = ((*lptr >> 12) & 0xff) - 128;
lptr++;
U = ((*lptr >> 02) & 0xff) - 128;
Y = ((*lptr >> 12) & 0xff) - 16;
lptr++;
Y += ((*lptr >> 02) & 0xff) - 16;
Y >>= 1;
break;
case 4:
lptr += 2;
V = ((*lptr >> 22) & 0xff) - 128;
lptr++;
Y = ((*lptr >> 02) & 0xff) - 16;
U = ((*lptr >> 12) & 0xff) - 128;
Y += ((*lptr >> 22) & 0xff) - 16;
Y >>= 1;
break;
case 5:
lptr += 2;
V = ((*lptr >> 22) & 0xff) - 128;
lptr++;
U = ((*lptr >> 12) & 0xff) - 128;
Y = ((*lptr >> 22) & 0xff) - 16;
lptr++;
Y += ((*lptr >> 02) & 0xff) - 16;
Y >>= 1;
break;
}
R = (9535 * Y + 14688 * V) >> 13; //13-bit white
G = (9535 * Y - 4375 * V - 1745 * U) >> 13;
B = (9535 * Y + 17326 * U) >> 13;
if (R > 255) R = 255;
if (R < 0) R = 0;
if (G > 255) G = 255;
if (G < 0) G = 0;
if (B > 255) B = 255;
if (B < 0) B = 0;
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
if (scaledvectorscope)
{
U *= 255;
U /= 314;
V *= 255;
V /= 244;
}
U += 128;
V += 128;
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB24:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int R, G, B, U, V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 3;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_RGB32:
tools->histogram = 1;
for (x = 0, pos = 0; x < width; x += step, pos++)
{
int R, G, B, U, V;
uint8_t *bptr = (uint8_t *)sbase;
bptr += x * 4;
R = bptr[2];
G = bptr[1];
B = bptr[0];
tools->histR[R]++;
tools->histG[G]++;
tools->histB[B]++;
tools->waveR[pos][R]++;
tools->waveG[pos][G]++;
tools->waveB[pos][B]++;
//Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16;
if (scaledvectorscope)
{
U = ((((-672 * R) - (2249 * G) + (2920 * B)) >> 13)) + 128; //* 255.0/314.0
V = ((((3758 * R) - (3416 * G) - (343 * B)) >> 13)) + 128; //* 255.0/244.0
}
else
{
U = ((((-827 * R) - (2769 * G) + (3596 * B)) >> 13)) + 128;
V = ((((3596 * R) - (3269 * G) - (328 * B)) >> 13)) + 128;
}
if (U < 0) U = 0;
if (U > 255) U = 255;
if (V < 0) V = 0;
if (V > 255) V = 255;
tools->scopeUV[U][V]++;
}
break;
case COLOR_FORMAT_BYR2:
case COLOR_FORMAT_BYR4:
//do nothing
break;
default:
assert(0);
#if (0 && DEBUG)
fprintf(stderr, "decoder.HistogramLine: Unsupported pixel format\n");
#endif
break;
}
}
void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR)
{
#if 1
int x, RL, GL, BL, RR, GR, BR;
int nRL, nGL, nBL;
int nRR, nGR, nBR;
int max = 1024 * 1024 - 1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL >>= 6;
ileakR >>= 6;
if (sqrttable == NULL)
return;
for (x = 0; x < width; x++)
{
RL = sbaseL[0] >> 6;
GL = sbaseL[1] >> 6; //10-bit
BL = sbaseL[2] >> 6;
RL *= RL;
GL *= GL; //20-bit
BL *= BL;
RR = sbaseR[0] >> 6;
GR = sbaseR[1] >> 6; //10-bit
BR = sbaseR[2] >> 6;
RR *= RR;
GR *= GR; //20-bit
BR *= BR;
nRL = RL * (1023 - ileakL) + ileakL * max - RR * ileakL; //30-bit
nGL = GL * (1023 - ileakL) + ileakL * max - GR * ileakL;
nBL = BL * (1023 - ileakL) + ileakL * max - BR * ileakL;
nRL >>= 10; //20-bit
nGL >>= 10;
nBL >>= 10;
if (nRL > max) nRL = max;
if (nRL < 0) nRL = 0;
if (nGL > max) nGL = max;
if (nGL < 0) nGL = 0;
if (nBL > max) nBL = max;
if (nBL < 0) nBL = 0;
if (sqrttable[nRL] == 65535)
sqrttable[nRL] = (int)sqrt(nRL);
if (sqrttable[nGL] == 65535)
sqrttable[nGL] = (int)sqrt(nGL);
if (sqrttable[nBL] == 65535)
sqrttable[nBL] = (int)sqrt(nBL);
sbaseL[0] = sqrttable[nRL] << 6;
sbaseL[1] = sqrttable[nGL] << 6;
sbaseL[2] = sqrttable[nBL] << 6;
sbaseL += 3;
nRR = RR * (1023 - ileakR) + ileakR * max - RL * ileakR; //30-bit
nGR = GR * (1023 - ileakR) + ileakR * max - GL * ileakR;
nBR = BR * (1023 - ileakR) + ileakR * max - BL * ileakR;
nRR >>= 10; //20-bit
nGR >>= 10;
nBR >>= 10;
if (nRR > max) nRR = max;
if (nRR < 0) nRR = 0;
if (nGR > max) nGR = max;
if (nGR < 0) nGR = 0;
if (nBR > max) nBR = max;
if (nBR < 0) nBR = 0;
if (sqrttable[nRR] == 65535)
sqrttable[nRR] = (int)sqrt(nRR);
if (sqrttable[nGR] == 65535)
sqrttable[nGR] = (int)sqrt(nGR);
if (sqrttable[nBR] == 65535)
sqrttable[nBR] = (int)sqrt(nBR);
sbaseR[0] = sqrttable[nRR] << 6;
sbaseR[1] = sqrttable[nGR] << 6;
sbaseR[2] = sqrttable[nBR] << 6;
sbaseR += 3;
}
#else // works and fast but has not image linearization, not as good
__m128i *ptrL = (__m128i *)sbaseL;
__m128i *ptrR = (__m128i *)sbaseR;
__m128i t, L, R, nL, nR;
int x, width8 = (width * 3) & ~7;
__m128i white_epi16 = _mm_set1_epi16(32767);
__m128i leak_epi16 = _mm_set1_epi16(ileak >> 1);
__m128i oneNegLeak_epi16 = _mm_set1_epi16(32767 - (ileak >> 1));
for (x = 0; x < width8; x += 8)
{
L = _mm_load_si128(ptrL);
R = _mm_load_si128(ptrR);
L = _mm_srli_epi16(L, 1); //15-bit
R = _mm_srli_epi16(R, 1); //15-bit
nL = _mm_mulhi_epi16(L, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nL = _mm_adds_epi16(nL, t);
t = _mm_mulhi_epi16(R, leak_epi16);
nL = _mm_subs_epu16(nL, t);
nR = _mm_mulhi_epi16(R, oneNegLeak_epi16);
t = _mm_mulhi_epi16(white_epi16, leak_epi16);
nR = _mm_adds_epi16(nR, t);
t = _mm_mulhi_epi16(L, leak_epi16);
nR = _mm_subs_epu16(nR, t);
L = _mm_slli_epi16(nL, 2);
R = _mm_slli_epi16(nR, 2);
_mm_store_si128(ptrL++, L);
_mm_store_si128(ptrR++, R);
}
#endif
}
void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
#if 1
int x, R, G, B;
int nR, nG, nB;
int max = 1024 * 1024 - 1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL >>= 6;
ileakR >>= 6;
if (sqrttable == NULL)
return;
for (x = 0; x < width; x++)
{
R = sbase[0] >> 6;
G = sbase[1] >> 6; //10-bit
B = sbase[2] >> 6;
R *= R;
G *= G; //20-bit
B *= B;
nR = R * (1023 - ileakL) + ileakL * max - ((G + B) >> 1) * ileakL; //30-bit
nG = G * (1023 - ileakR) + ileakR * max - R * ileakR;
nB = B * (1023 - ileakR) + ileakR * max - R * ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if (nR > max) nR = max;
if (nR < 0) nR = 0;
if (nG > max) nG = max;
if (nG < 0) nG = 0;
if (nB > max) nB = max;
if (nB < 0) nB = 0;
if (sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if (sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if (sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR] << 6;
sbase[1] = sqrttable[nG] << 6;
sbase[2] = sqrttable[nB] << 6;
sbase += 3;
}
#elif 0
int x;
float R, G, B;
float nR, nG, nB;
float fleakL = (float)ileakL / 65535.0;
float fleakR = (float)ileakR / 65535.0;
for (x = 0; x < width; x++)
{
R = sbase[0];
G = sbase[1];
B = sbase[2];
R /= 65535.0;
G /= 65535.0;
B /= 65535.0;
R *= R;
G *= G;
B *= B;
nR = R * (1.0 - fleakL) + fleakL - (G + B) * 0.5 * fleakL;
nG = G * (1.0 - fleakR) + fleakR - R * fleakR;
nB = B * (1.0 - fleakR) + fleakR - R * fleakR;
if (nR < 0) nR = 0;
if (nG < 0) nG = 0;
if (nB < 0) nB = 0;
nR = sqrt(nR);
nG = sqrt(nG);
nB = sqrt(nB);
sbase[0] = nR * 65535.0;
sbase[1] = nG * 65535.0;
sbase[2] = nB * 65535.0;
sbase += 3;
}
#elif 0
__m128i RGBRGB, rgb_epi32, RGB1, RGB2;
__m128i zero_epi128 = _mm_setzero_si128();
int x, width6 = (width * 3) / 6 * 6;
__m128 white_ps = _mm_set1_ps(1.0);
__m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL / 65536.0), 1.0 - ((float)ileakR / 65536.0), 1.0 - ((float)ileakR / 65536.0), 1.0 - ((float)ileakL / 65536.0));
__m128 leak_ps = _mm_set_ps((float)ileakL / 65536.0, (float)ileakR / 65536.0, (float)ileakR / 65536.0, (float)ileakL / 65536.0);
__m128 scale_ps = _mm_set1_ps(65535.0);
__m128 scalehalf_ps = _mm_set1_ps(32767.0);
__m128 zero_ps = _mm_set1_ps(0.0);
__m128 rgb_ps, alt_rgb_ps;
__m128i sub_epi32;
__m128 sub_ps;
for (x = 0; x < width6; x += 6) // two RGB pairs
{
int R, G, B;
RGBRGB = _mm_loadu_si128((__m128i *)sbase);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G += B;
G >>= 1;
sub_epi32 = _mm_set_epi32(G, R, R, G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB1 = _mm_cvtps_epi32(rgb_ps);
RGB1 = _mm_packs_epi32 (RGB1, zero_epi128);
RGB1 = _mm_slli_si128(RGB1, 10);
RGB1 = _mm_srli_si128(RGB1, 10);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
R = _mm_extract_epi16(RGBRGB, 0);
G = _mm_extract_epi16(RGBRGB, 1);
B = _mm_extract_epi16(RGBRGB, 2);
G += B;
G >>= 1;
sub_epi32 = _mm_set_epi32(G, R, R, G);
sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0
sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0
sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square
rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128);
rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0
rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0
rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square
rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL;
rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL;
sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;]
rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;]
rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0;
rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt()
rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767
RGB2 = _mm_cvtps_epi32(rgb_ps);
RGB2 = _mm_packs_epi32 (RGB2, zero_epi128);
RGB2 = _mm_slli_si128(RGB2, 6);
RGB1 = _mm_adds_epi16(RGB1, RGB2);
RGB1 = _mm_slli_epi16(RGB1, 1);
RGB1 = _mm_slli_si128(RGB1, 4);
RGB1 = _mm_srli_si128(RGB1, 4);
RGBRGB = _mm_srli_si128(RGBRGB, 6);
RGBRGB = _mm_slli_si128(RGBRGB, 12);
RGBRGB = _mm_adds_epi16(RGB1, RGBRGB);
_mm_storeu_si128((__m128i *)sbase, RGBRGB);
sbase += 6;
}
#endif
}
void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x, R, G, B;
int nR, nG, nB;
int max = 1024 * 1024 - 1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL >>= 6;
ileakR >>= 6;
if (sqrttable == NULL)
return;
for (x = 0; x < width; x++)
{
R = sbase[0] >> 6;
G = sbase[1] >> 6; //10-bit
B = sbase[2] >> 6;
R *= R;
G *= G; //20-bit
B *= B;
nR = R * (1023 - ileakL) + ileakL * max - B * ileakL;
nG = G * (1023 - ileakL) + ileakL * max - B * ileakL;
nB = B * (1023 - ileakR) + ileakR * max - ((R + G) >> 1) * ileakR;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if (nR > max) nR = max;
if (nR < 0) nR = 0;
if (nG > max) nG = max;
if (nG < 0) nG = 0;
if (nB > max) nB = max;
if (nB < 0) nB = 0;
if (sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if (sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if (sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR] << 6;
sbase[1] = sqrttable[nG] << 6;
sbase[2] = sqrttable[nB] << 6;
sbase += 3;
}
}
void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR)
{
int x, R, G, B;
int nR, nG, nB;
int max = 1024 * 1024 - 1;
unsigned short *sqrttable = decoder->sqrttable;
ileakL >>= 6;
ileakR >>= 6;
if (sqrttable == NULL)
return;
for (x = 0; x < width; x++)
{
R = sbase[0] >> 6;
G = sbase[1] >> 6; //10-bit
B = sbase[2] >> 6;
R *= R;
G *= G; //20-bit
B *= B;
nR = R * (1023 - ileakL) + ileakL * max - G * ileakL;
nG = G * (1023 - ileakR) + ileakR * max - ((R + B) >> 1) * ileakR;
nB = B * (1023 - ileakL) + ileakL * max - G * ileakL;
nR >>= 10; //20-bit
nG >>= 10;
nB >>= 10;
if (nR > max) nR = max;
if (nR < 0) nR = 0;
if (nG > max) nG = max;
if (nG < 0) nG = 0;
if (nB > max) nB = max;
if (nB < 0) nB = 0;
if (sqrttable[nR] == 65535)
sqrttable[nR] = (int)sqrt(nR);
if (sqrttable[nG] == 65535)
sqrttable[nG] = (int)sqrt(nG);
if (sqrttable[nB] == 65535)
sqrttable[nB] = (int)sqrt(nB);
sbase[0] = sqrttable[nR] << 6;
sbase[1] = sqrttable[nG] << 6;
sbase[2] = sqrttable[nB] << 6;
sbase += 3;
}
}
void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank)
{
uint16_t *scratchline, *scratchline2, *scratchline3;
uint16_t *sptr;
uint16_t *srclineA, *srclineB;
uint16_t *dstlineA, *dstlineB;
int x, y2;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
int sskip = 3;
uint8_t *bptr1;
uint8_t *bptr2;
uint8_t *baseptr1;
uint8_t *baseptr2;
float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL;
float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR;
float frameTilt = decoder->cfhddata.channel[0].FrameTilt;
float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset;
float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset;
float rotOffset = decoder->cfhddata.channel[1].RotationOffset;
float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset;
float horizOffsetStep = 0;
float horizOffsetStepR = 0;
int flip1 = 0, flip2 = 0;
int channel_flip = decoder->cfhddata.channel_flip;
int source_pitch1 = source_pitch;
int source_pitch2 = source_pitch;
uint8_t *outputline = output + y * pitch;
uint8_t *outputline2 = NULL;
float horizOffsetBase;
float rotOffsetBase;
float horizOffsetBaseR;
float rotOffsetBaseR;
int formatdone = 0;
float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
float zoom;
float zoomR;
float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom;
float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom;
float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom;
float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom;
float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom;
float frameHDynamic = decoder->cfhddata.FrameHDynamic;
float frameHDynCenter = decoder->cfhddata.FrameHDynCenter;
float frameHDynWidth = decoder->cfhddata.FrameHDynWidth;
float frameHScale = decoder->cfhddata.FrameHScale;
int alphachannel = 0;
int whitepoint = 16;
float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen;
float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen;
float vignette = decoder->cfhddata.channel[0].user_vignette_start;
int flip_LR = 0;
float vig_r1;
float vig_r2;
float vig_gain;
if (blank) // blankline, no shifts required
{
windowMaskL = 0;
windowMaskR = 0;
frameTilt = 0;
horizOffset = 0;
horizOffsetR = 0;
rotOffset = 0;
rotOffsetR = 0;
frameZoom1 = 1.0;
frameZoom2 = 1.0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
frameHScale = 1.0;
frameHDynamic = 1.0;
frameHDynCenter = 0.5;
frameHDynWidth = 0.0;
}
if ( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if (xmax == 0.0) xmax = 1.0;
if (ymax == 0.0) ymax = 1.0;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
if (decoder->source_channels < 2) // 2D
{
channel_flip &= 0x3;
channel_flip |= channel_flip << 2;
decoder->cfhddata.channel_flip = channel_flip;
}
if (!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpenL = 0.0;
blursharpenR = 0.0;
}
if (!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
horizOffset = rotOffset = 0;
horizOffsetR = rotOffsetR = 0;
frameTilt = 0;
frameAutoZoom = 1.0;
frameDiffZoom1 = 1.0;
frameDiffZoom2 = 1.0;
}
if (!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if (decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
horizOffset += decoder->cfhddata.FrameOffsetX;
horizOffsetR -= decoder->cfhddata.FrameOffsetX;
frameZoom1 += frameHScale - 1.0f;
frameZoom2 += frameHScale - 1.0f;
if (frameHDynamic != 1.0)
{
frameZoom1 += 0.00001f;
frameZoom2 += 0.00001f;
}
if (vignette != 0.0)
{
float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width));
vig_r1 = (vignette + 1.0f);
vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end + 1.0f);
vig_gain = decoder->cfhddata.channel[0].user_vignette_gain;
vig_r1 *= vig_diag;
vig_r2 *= vig_diag;
}
}
else
{
frameZoom1 = 1.0f;
frameZoom2 = 1.0f;
vignette = 0;
}
zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1;
if (frameDiffZoom2 != 0.0)
zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2;
else
zoomR = 0.0;
if (decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
{
if (decoder->cfhddata.InvertOffset)
{
rotOffset = -rotOffset;
rotOffsetR = -rotOffsetR;
rotOffset -= decoder->cfhddata.FrameOffsetR;
rotOffsetR -= -decoder->cfhddata.FrameOffsetR;
}
else
{
rotOffset += decoder->cfhddata.FrameOffsetR;
rotOffsetR += -decoder->cfhddata.FrameOffsetR;
}
}
rotOffsetBase = rotOffset;
horizOffsetBase = horizOffset;
rotOffsetBaseR = rotOffsetR;
horizOffsetBaseR = horizOffsetR;
horizOffset -= rotOffset * 0.5f;
horizOffsetStep = rotOffset / (float)height;
horizOffsetR -= rotOffsetR * 0.5f;
horizOffsetStepR = rotOffsetR / (float)height;
horizOffset += horizOffsetStep * y;
horizOffsetR += horizOffsetStepR * y;
assert(bufferremain >= width * 8 * 2 * 2);
baseptr1 = source_buffer;
baseptr2 = source_buffer + channel_offset;
if (channel_flip & 0xf)
{
if (channel_flip & 1)
{
flip1 = 1;
}
if (channel_flip & 4)
{
flip2 = 1;
}
}
if (source_pitch1 < 0)
flip_LR = 1;
decoder->sharpen_flip = 0;
if (channel_flip & 2) //ProcessLine3D
{
if (decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
}
else
{
baseptr1 += source_pitch1 * (height - 1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
}
if (channel_flip & 8)
{
if (decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1)
{
baseptr1 += source_pitch1 * (height - 1);
source_pitch1 = -source_pitch1;
decoder->sharpen_flip = 1;
}
else
{
baseptr2 += source_pitch2 * (height - 1);
source_pitch2 = -source_pitch2;
}
}
bptr1 = baseptr1 + y * source_pitch1;
bptr2 = baseptr2 + y * source_pitch2;
y2 = y;
if (decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView
{
if (y2 < height / 4)
{
blank = 1;
y2 = 0;
}
else
{
y2 -= height / 4;
y2 *= 2;
if (y2 >= height - 1)
{
blank = 1;
y2 = height - 2;
}
}
bptr1 = baseptr1 + y2 * source_pitch1;
bptr2 = baseptr2 + y2 * source_pitch2;
}
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 6 * 2 + width * 2) /* as we pad the line */ ;
if (alphachannel)
{
scratchline = (uint16_t *)buffer;
scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;;
scratchline3 = (uint16_t *)(buffer + width * 8 * 2 + width * 2) /* as we pad the line */ ;
}
dstlineA = sptr = scratchline;
dstlineB = scratchline3;
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
whitepoint = 16;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_W13A:
whitepoint = 13;
skip = 8;
sskip = 4;
break;
case DECODED_FORMAT_WP13:
whitepoint = 13;
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RG48:
skip = 6;
sskip = 3;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
if (blank)
{
if (srclineA)
memset(srclineA, 0, width * skip);
if (srclineB && decoder->channel_decodes > 1)
memset(srclineB, 0, width * skip);
}
if (blursharpenL != 0.0 || blursharpenR != 0.0)
{
if (decoder->channel_blend_type == BLEND_FREEVIEW ||
decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
decoder->channel_blend_type == BLEND_LINE_INTERLEAVED
)
{
decoder->doVerticalFilter = 0;
}
else
{
decoder->doVerticalFilter = 1;
}
}
{
switch (decoder->channel_blend_type)
{
case BLEND_FREEVIEW:
case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side
if (!blank)
{
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
dstlineA = srclineA;
sptr = dstlineA;
if (zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if (!alphachannel)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width / 2, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width / 2, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width / 2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width / 2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width / 2, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width / 2, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width / 2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width / 2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if (vignette != 0.0)
{
int cwidth = width / 2;
if (decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth = width;
FastVignetteInplaceWP13(decoder, width / 2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width / 2, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if (blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width / 2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if (blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width / 2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
memcpy(dstlineA + sskip * (width / 2), srclineB, width / 2 * sskip * 2);
}
else
{
int16_t *ptr;
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (!alphachannel)
{
if (zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else
{
if (zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if (vignette != 0.0)
{
int cwidth = width / 2;
if (decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC)
cwidth = width;
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineB, decoder->frame.resolution, skip);
}
if (blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if (blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip);
dstlineA = srclineA;
ptr = (int16_t *)srclineA;
for (x = 0; x < width / 2; x++)
{
*ptr++ = (ptr1[0] + ptr1[3]) >> 1;
*ptr++ = (ptr1[1] + ptr1[4]) >> 1;
*ptr++ = (ptr1[2] + ptr1[5]) >> 1 ;
ptr1 += sskip * 2;
}
for (; x < width; x++)
{
*ptr++ = (ptr2[0] + ptr2[3]) >> 1;
*ptr++ = (ptr2[1] + ptr2[4]) >> 1;
*ptr++ = (ptr2[2] + ptr2[5]) >> 1;
ptr2 += sskip * 2;
}
}
if (windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, dstlineA, width / 2, 0, mask);
if (windowMaskL < 0)
RGB48WindowMask(decoder, dstlineA, width / 2, 0, windowMaskL);
if (xmin)
{
RGB48WindowMask(decoder, dstlineA, width / 2, 1, xmin);
}
}
if (windowMaskR || (1.0 - xmax))
{
float mask = windowMaskR > (1.0f - xmax) ? windowMaskR : (1.0f - xmax);
RGB48WindowMask(decoder, dstlineA + width * sskip / 2, width / 2, 1, mask);
if (windowMaskR < 0)
RGB48WindowMask(decoder, dstlineA + width * sskip / 2, width / 2, 1, windowMaskR);
if (xmin)
{
RGB48WindowMask(decoder, dstlineA + width * sskip / 2, width / 2, 0, xmin);
}
}
if (decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if (decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, dstlineA, dstlineA + width * sskip / 2, width / 2, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2 + width * sskip / 2, dstlineA, width * sskip * 2 / 2);
memcpy(dstlineA, dstlineA + width * sskip / 2, width * sskip * 2 / 2);
memcpy(dstlineA + width * sskip / 2, scratchline2 + width * sskip / 2, width * sskip * 2 / 2);
}
}
break;
case BLEND_STACKED_ANAMORPHIC: //stacked
case BLEND_LINE_INTERLEAVED: //fields
if ((y & 1) == 1) return;
if (!blank)
{
uint16_t *ptrA1 = (uint16_t *)srclineA;
uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1 >> 1);
uint16_t *ptrB1 = (uint16_t *)srclineB;
uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2 >> 1);
FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width * skip);
FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width * skip);
if (zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if (!alphachannel)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if (vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if (blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if (blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if (windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if (windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if (xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if (windowMaskR || (1.0 - xmax))
{
float mask = windowMaskR > (1.0f - xmax) ? windowMaskR : (1.0f - xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if (windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if (xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if (decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if (decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if (decoder->doVerticalFilter == 0)
{
if (decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) //stacked
{
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline2 = output + (y >> 1) * pitch;
outputline = output + ((y >> 1) + (height / 2)) * pitch;
}
else
{
outputline = output + (y >> 1) * pitch;
outputline2 = output + ((y >> 1) + (height / 2)) * pitch;
}
}
else //fields
{
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
outputline = output + (y) * pitch;
outputline2 = output + (y + 1) * pitch;
}
else
{
outputline2 = output + (y) * pitch;
outputline = output + (y + 1) * pitch;
}
}
if (flip_LR/*source_pitch1 < 0*/) // flip Left and Right
{
uint8_t *tmp = outputline2;
outputline2 = outputline;
outputline = tmp;
}
}
else
{
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
memcpy(scratchline2, srclineA, width * skip);
memcpy(srclineA, srclineB, width * skip);
memcpy(srclineB, scratchline2, width * skip);
}
}
}
break;
case BLEND_ONION: //onion
case BLEND_DIFFERENCE: //difference
case BLEND_SPLITVIEW: //splitView
if (!blank)
{
//dstlineA = source_buffer;
//dstlineA += (source_pitch>>1) * y;
sptr = dstlineA = srclineA;
srclineA = (uint16_t *)bptr1;
srclineB = (uint16_t *)bptr2;
if (zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if (!alphachannel)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if (vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if (blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if (blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if (windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if (windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if (xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if (windowMaskR || (1.0 - xmax))
{
float mask = windowMaskR > (1.0f - xmax) ? windowMaskR : (1.0f - xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if (windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if (xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
x = 0;
if (decoder->channel_blend_type == BLEND_SPLITVIEW) //split view
{
int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255;
for (x = xsplit * sskip; x < width * sskip; x++)
{
srclineA[x] = srclineB[x];
}
}
else if (decoder->channel_blend_type == BLEND_ONION) //onion
{
FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width * skip);
}
else if (decoder->channel_blend_type == BLEND_DIFFERENCE) //difference
{
#if XMMOPT
int width8 = (width * sskip) & 0xfff8;
__m128i mid_epi16;
//int unaligned = ((int)sbase) & 15;
//unaligned += ((int)in_rgb8) & 15;
if (whitepoint == 13)
mid_epi16 = _mm_set1_epi16(0x0fff);
else
mid_epi16 = _mm_set1_epi16(0x1fff);
for (x = 0; x < width8; x += 8)
{
__m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]);
__m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]);
// 0 to 0xffff
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff
}
else
{
rgb16A = _mm_subs_epi16(rgb16A, rgb16B);
}
rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff
_mm_store_si128((__m128i *)&dstlineA[x], rgb16A);
}
#endif
for (; x < width * sskip; x++)
{
int val;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
val = (srclineB[x] - srclineA[x]) + 32768;
}
else
{
val = (srclineA[x] - srclineB[x]) + 32768;
}
if (val > 0x7fff) val = 0x7fff;
if (val < 0) val = 0;
dstlineA[x] = val;
}
}
}
break;
case BLEND_ANAGLYPH_RC:
case BLEND_ANAGLYPH_RC_BW:
case BLEND_ANAGLYPH_AB:
case BLEND_ANAGLYPH_AB_BW:
case BLEND_ANAGLYPH_GM:
case BLEND_ANAGLYPH_GM_BW:
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
uint16_t *sptr1 = scratchline2;
uint16_t *sptr2 = scratchline3;
dstlineA = (uint16_t *)bptr1;
// dstlineA += (source_pitch>>1) * y;
sptr = dstlineA;
sptr1 = srclineA = (uint16_t *)bptr1;
sptr2 = srclineB = (uint16_t *)bptr2;
if (zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt)
{
if (!alphachannel)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1);
RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2);
}
else
{
RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
else
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
{
RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1);
RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2);
}
else
{
RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
if (vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineA, decoder->frame.resolution, skip);
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(short *)srclineB, decoder->frame.resolution, skip);
}
if (blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip);
if (blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip);
if (decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if (decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
if (windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if (windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
if (xmin)
{
RGB48WindowMask(decoder, srclineA, width, 1, xmin);
}
}
if (windowMaskR || (1.0 - xmax))
{
float mask = windowMaskR > (1.0f - xmax) ? windowMaskR : (1.0f - xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if (windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
if (xmin)
{
RGB48WindowMask(decoder, srclineB, width, 0, xmin);
}
}
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
uint16_t *tmp = srclineA;
srclineA = srclineB;
srclineB = tmp;
}
switch (decoder->channel_blend_type)
{
case BLEND_ANAGLYPH_RC:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for (x = 0; x < width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for (x = 0; x < width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_RC_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for (x = 0; x < width; x++)
{
int y1 = (ptr1[0] * 5 + ptr1[1] * 10 + ptr1[2]) >> 4;
int y2 = (ptr2[0] * 5 + ptr2[1] * 10 + ptr2[2]) >> 4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for (x = 0; x < width; x++)
{
int y1 = (ptr1[0] * 5 + ptr1[1] * 10 + ptr1[2]) >> 4;
int y2 = (ptr2[0] * 5 + ptr2[1] * 10 + ptr2[2]) >> 4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for (x = 0; x < width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for (x = 0; x < width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_AB_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for (x = 0; x < width; x++)
{
int y1 = (ptr1[0] * 5 + ptr1[1] * 10 + ptr1[2]) >> 4;
int y2 = (ptr2[0] * 5 + ptr2[1] * 10 + ptr2[2]) >> 4;
sptr[0] = y2;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for (x = 0; x < width; x++)
{
int y1 = (ptr1[0] * 5 + ptr1[1] * 10 + ptr1[2]) >> 4;
int y2 = (ptr2[0] * 5 + ptr2[1] * 10 + ptr2[2]) >> 4;
sptr[0] = y1;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for (x = 0; x < width; x++)
{
sptr[0] = ptr1[0];
sptr[1] = ptr2[1];
sptr[2] = ptr1[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for (x = 0; x < width; x++)
{
sptr[0] = ptr2[0];
sptr[1] = ptr1[1];
sptr[2] = ptr2[2];
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_GM_BW:
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
if (decoder->channel_swapped_flags & FLAG3D_SWAPPED)
{
for (x = 0; x < width; x++)
{
int y1 = (ptr1[0] * 5 + ptr1[1] * 10 + ptr1[2]) >> 4;
int y2 = (ptr2[0] * 5 + ptr2[1] * 10 + ptr2[2]) >> 4;
sptr[0] = y1;
sptr[1] = y2;
sptr[2] = y1;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
else
{
for (x = 0; x < width; x++)
{
int y1 = (ptr1[0] * 5 + ptr1[1] * 10 + ptr1[2]) >> 4;
int y2 = (ptr2[0] * 5 + ptr2[1] * 10 + ptr2[2]) >> 4;
sptr[0] = y2;
sptr[1] = y1;
sptr[2] = y2;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
}
break;
case BLEND_ANAGLYPH_DUBOIS: //Optimized
{
int16_t *ptr1 = (int16_t *)srclineA;
int16_t *ptr2 = (int16_t *)srclineB;
int r, g, b;
for (x = 0; x < width; x++)
{
r = (ptr1[0] * 456 + ptr1[1] * 500 + ptr1[2] * 176 + ptr2[0] * -43 + ptr2[1] * -88 + ptr2[2] * -2 ) / 1000;
g = (ptr1[0] * -40 + ptr1[1] * -38 + ptr1[2] * -16 + ptr2[0] * 378 + ptr2[1] * 734 + ptr2[2] * -18 ) / 1000;
b = (ptr1[0] * -15 + ptr1[1] * -21 + ptr1[2] * -5 + ptr2[0] * -72 + ptr2[1] * -113 + ptr2[2] * 1226) / 1000;
if (r < 0) r = 0;
if (r > 0x3fff) r = 0x3fff;
if (g < 0) g = 0;
if (g > 0x3fff) g = 0x3fff;
if (b < 0) b = 0;
if (b > 0x3fff) b = 0x3fff;
sptr[0] = r;
sptr[1] = g;
sptr[2] = b;
ptr1 += sskip;
ptr2 += sskip;
sptr += sskip;
}
}
break;
}
}
break;
case BLEND_NONE:
default:
if (decoder->channel_decodes == 1) // only one channel
{
if (skip == 8)
{
//the data is already in the correct format
sptr = (unsigned short *)bptr1;
// shift if needed.
if (zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if (decoder->channel_current == 0)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2);
else
RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
}
else if (skip == 6)
{
//the data is already in the correct format
dstlineA = sptr = (unsigned short *)srclineA;
// shift if needed.
if (zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if (decoder->channel_current == 0)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
}
else
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
}
if (vignette != 0.0)
{
FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain,
(int16_t *)srclineA, decoder->frame.resolution, skip);
}
if (decoder->channel_current == 0)
{
if (blursharpenL != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip);
}
}
else
{
if (blursharpenR != 0.0)
{
FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip);
}
}
}
if ((windowMaskL && decoder->channel_current == 0) || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
if (decoder->channel_current != 0) mask = xmin;
if (windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
RGB48WindowMask(decoder, srclineA, width, 0, mask);
}
if ((windowMaskR && decoder->channel_current == 1) || (1.0f - xmax))
{
float mask = windowMaskR > (1.0f - xmax) ? windowMaskR : (1.0f - xmax);
if (decoder->channel_current != 1) mask = (1.0f - xmax);
if (windowMaskR < 0)
RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR);
RGB48WindowMask(decoder, srclineA, width, 1, mask);
}
}
else
{
outputline2 = output + (y + height) * pitch;
if (zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt)
{
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1);
else
RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0);
if (zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0)
RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2);
else
RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1);
}
if (windowMaskL || xmin)
{
float mask = windowMaskL > xmin ? windowMaskL : xmin;
RGB48WindowMask(decoder, srclineA, width, 0, mask);
if (windowMaskL < 0)
RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL);
}
if (windowMaskR || (1.0 - xmax))
{
float mask = windowMaskR > (1.0f - xmax) ? windowMaskR : (1.0f - xmax);
RGB48WindowMask(decoder, srclineB, width, 1, mask);
if (windowMaskR < 0)
RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR);
}
if (decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if (decoder->ghost_bust_left || decoder->ghost_bust_right)
{
GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right);
}
}
}
break;
}
}
if (!formatdone)
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if (decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)
{
flags = 0;
whitebitdepth = 13;
}
if (outputline2)
{
// if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if (decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if (alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
// HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth);
if (alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineB,
outputline2, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
else
{
//if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools)
//{
// if(alphachannel)
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth);
// else
// HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth);
//}
if (decoder->doVerticalFilter == 0) // No sharp stage so output now
{
if (alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, srclineA,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
}
}
void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index)
{
uint16_t *sbase;//*sbase2 = NULL;
int width = decoder->frame.width;
int height = decoder->frame.height;
int skip = 3;
//int flip1=0;//flip2=0;
int channel_flip = decoder->cfhddata.channel_flip;
//int local_pitch1 = local_pitch;
//int local_pitch2 = local_pitch;
uint8_t *outputline = output + y * pitch;
//uint8_t *outputline2 = NULL;
short *scratch;
//int formatdone = 0;
//float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX;
//float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX;
//float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY;
//float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY;
int alphachannel = 0;
float blursharpen = 0;
int line_max = decoder->frame.height;
int yy = y;
if (decoder->channel_current == 0)
blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
else
blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen
if (!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER ||
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY ||
decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED)
{
blursharpen = 0.0;
}
if (decoder->channel_mix_half_res == 1)
line_max *= 2;
if (!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if (decoder->sharpen_flip) //SharpenLine
{
//if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1)
{
yy = (line_max - 1 - y);
outputline = output + yy * pitch;
}
}
if ( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 ||
decoder->StereoBufferFormat == DECODED_FORMAT_W13A ||
decoder->StereoBufferFormat == DECODED_FORMAT_RGB32)
alphachannel = 1;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
width *= 2;
}
sbase = (uint16_t *)local_output;
sbase += (local_pitch >> 1) * y;
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_W13A:
skip = 8;
break;
case DECODED_FORMAT_WP13:
skip = 6;
break;
case DECODED_FORMAT_RG48:
skip = 6;
break;
case DECODED_FORMAT_RGB32:
skip = 4;
break;
case DECODED_FORMAT_RGB24:
skip = 3;
break;
case DECODED_FORMAT_YUYV:
skip = 2;
break;
}
scratch = (short *)(buffer + width * skip * thread_index);
{
int flags = ACTIVEMETADATA_PRESATURATED;
int whitebitdepth = 16;
if ((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A))
{
int use_pitch = local_pitch;
int edgeclose = 0;
flags = 0;
whitebitdepth = 13;
if (blursharpen != 0.0 && local_pitch != 0)
{
short *Aptr, *Bptr, *Cptr, *Dptr, *Eptr;
switch (decoder->channel_blend_type)
{
case BLEND_STACKED_ANAMORPHIC:
sbase = (uint16_t *)local_output;
sbase += (local_pitch >> 1) * y * 2;
if (y <= 4) edgeclose = 1;
if (y >= 2) Aptr = (short *)sbase - (local_pitch >> 1) * 4;
else Aptr = (short *)sbase;
if (y >= 1) Bptr = (short *)sbase - (local_pitch >> 1) * 2;
else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if (y < height - 1) Dptr = (short *)sbase + (local_pitch >> 1) * 2;
else Dptr = (short *)sbase;
if (y < height - 2) Eptr = (short *)sbase + (local_pitch >> 1) * 4;
else Eptr = (short *)sbase;
if (y >= height - 4) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
case BLEND_LINE_INTERLEAVED:
sbase = (uint16_t *)local_output;
if (y & 1)
{
y--;
sbase += (local_pitch >> 1) * y;
}
else
{
sbase += (local_pitch >> 1) * y;
sbase += channel_offset >> 1;
}
if (y <= 8) edgeclose = 1;
if (y >= 4) Aptr = (short *)sbase - (local_pitch >> 1) * 4;
else Aptr = (short *)sbase;
if (y >= 2) Bptr = (short *)sbase - (local_pitch >> 1) * 2;
else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if (y < height - 2) Dptr = (short *)sbase + (local_pitch >> 1) * 2;
else Dptr = (short *)sbase;
if (y < height - 4) Eptr = (short *)sbase + (local_pitch >> 1) * 4;
else Eptr = (short *)sbase;
if (y >= height - 8) edgeclose = 1;
use_pitch = local_pitch * 2;
break;
default:
if (y <= 4) edgeclose = 1;
if (y >= 2) Aptr = (short *)sbase - (local_pitch >> 1) * 2;
else Aptr = (short *)sbase;
if (y >= 1) Bptr = (short *)sbase - (local_pitch >> 1) * 1;
else Bptr = (short *)sbase;
Cptr = (short *)sbase;
if (y < height - 1) Dptr = (short *)sbase + (local_pitch >> 1) * 1;
else Dptr = (short *)sbase;
if (y < height - 2) Eptr = (short *)sbase + (local_pitch >> 1) * 2;
else Eptr = (short *)sbase;
if (y >= height - 4) edgeclose = 1;
use_pitch = local_pitch;
break;
}
if (skip == 8)
{
FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
else
{
FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose,
scratch, width, blursharpen,
decoder->frame.resolution,
decoder->channel_blend_type);
}
sbase = (uint16_t *)scratch;
}
}
if (alphachannel)
Convert4444LinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
else
ConvertLinesToOutput(decoder, width, 1, y, sbase,
outputline, pitch, decoder->frame.format, whitebitdepth, flags);
}
}
extern int geomesh_alloc_cache(void *gm);
#define DEG2RAD(d) (PI*(d)/180.0f)
#define RAD2DEG(r) (180.0f*(r)/PI)
bool approx_equal(int x, int y)
{
if (y > 1080)
{
x >>= 6;
y >>= 6;
}
else if (y > 540)
{
x >>= 5;
y >>= 5;
}
else
{
x >>= 4;
y >>= 4;
}
if (x == y || x + 1 == y || x == y + 1)
return true;
return false;
}
bool approx_equal_float(float x, float y)
{
if (x * 0.99 < y && y < x * 1.01)
return true;
return false;
}
void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset)
{
uint8_t *local_output_double = local_output;
//Frame_Region emptyFrameMask = {0};
if (decoder->StereoBuffer)
local_output_double = local_output = (uint8_t *)decoder->StereoBuffer;
if (channel_offset < 0) // channel swapped
{
channel_offset = -channel_offset;
}
if (INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format))
{
local_output += local_pitch * (decoder->frame.height - 1);
if (decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
local_output_double += local_pitch * (decoder->frame.height * decoder->channel_decodes - 1);
else
local_output_double = local_output;
local_pitch = -local_pitch;
}
if (FLIPCOLORS(output_format) || output_format & 0x80000000)
{
decoder->cfhddata.InvertOffset = 1;
}
else
{
decoder->cfhddata.InvertOffset = 0;
}
decoder->frame.format = output_format;
//decoder->frame.colorspace = COLOR_SPACE_CG_601;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int workunits;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if ( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[2].FrameTilt))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
// decoder->cfhddata.FrameOffsetX || ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0) ))
{
//int x;
int xbytes, xstep;
//uint8_t *base = local_output;
int width, height, chunk_size;
int fine_vertical = 0;
width = decoder->frame.width;
height = decoder->frame.height;
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width * 4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width * 3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width * 2;
xstep = 16;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xbytes = width * 8;
xstep = 32;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
xbytes = width * 6;
xstep = 32;
break;
default:
assert(0);
break;
}
if (!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION | PROCESSING_FRAMING)) ||
(decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 &&
decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 &&
decoder->cfhddata.FrameOffsetR == 0.0))
{
chunk_size = 8;
}
else
{
chunk_size = 1;
if ((fabs(decoder->cfhddata.channel[1].RotationOffset) +
fabs(decoder->cfhddata.channel[1].FrameKeyStone * 0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 ||
(fabs(decoder->cfhddata.channel[2].RotationOffset) +
fabs(decoder->cfhddata.channel[2].FrameKeyStone * 0.2) +
fabs(decoder->cfhddata.FrameOffsetR)) > 0.015)
{
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xstep = 4;
break;
case DECODED_FORMAT_RGB24:
xstep = 3;
break;
case DECODED_FORMAT_YUYV:
xstep = 4;
break;
case DECODED_FORMAT_W13A:
case DECODED_FORMAT_RG64:
xstep = 8;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xstep = 6;
break;
}
fine_vertical = 1;
}
}
if ( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
(decoder->frame.resolution == DECODED_RESOLUTION_FULL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) &&
decoder->codec.progressive == false)
{
int interlaced_pitch = local_pitch * 2;
uint8_t *field2_output = local_output + local_pitch;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->line_max = (xbytes + xstep - 1) / xstep;
mailbox->chunk_size = chunk_size;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
// Post a message to the mailbox
mailbox->local_output = field2_output;
mailbox->local_pitch = interlaced_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->info.height >>= 1;
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep - 1) / xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
else
{
//TODO Lens corect here.
//call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.)
// JOB_TYPE_HORIZONTAL_3D
//before doing any offset and rotation corrections.
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129
width /= 2;
// Post a message to the mailbox
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = chunk_size;
mailbox->line_max = (xbytes + xstep - 1) / xstep;
mailbox->fine_vertical = fine_vertical;
mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if (decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
decoder->doVerticalFilter = 0;
mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
if (decoder->doVerticalFilter)
{
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->local_output = local_output_double;
mailbox->local_pitch = local_pitch;
mailbox->channel_offset = channel_offset;
memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO));
mailbox->chunk_size = 16;
mailbox->line_max = decoder->frame.height;
if (decoder->channel_decodes == 2 && decoder->channel_blend_type == 0)
mailbox->line_max *= 2;
if (decoder->channel_mix_half_res == 1)
mailbox->line_max *= 2;
workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size;
mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
}
#else
{
int y, width, height;
uint8_t scratch[4096 * 16];
int scratchremain = 4096 * 16;
int ymin = 0, ymax;
width = decoder->frame.width;
height = decoder->frame.height;
ymax = height;
if ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32))
{
ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY;
ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY;
}
if ( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) &&
(decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset))
||
((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) &&
(decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
{
int x, xbytes, xstep;
uint8_t *base = local_output;
float voffsetstep;
float voffset = decoder->cfhddata.channel[1].VerticalOffset;
float roffset = decoder->cfhddata.channel[1].RotationOffset;
float voffset1, voffset2;
float voffsetstep1, voffsetstep2;
int channel_flip = decoder->cfhddata.channel_flip;
int aspectx, aspecty;
float aspectfix;
GetDisplayAspectRatio(decoder, &aspectx, &aspecty);
aspectfix = (float)(aspectx * aspectx) / (float)(aspecty * aspecty);
if (!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION))
{
voffset = roffset = 0;
}
if (!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS))
{
channel_flip = 0;
}
if (decoder->cfhddata.process_path_flags & PROCESSING_FRAMING)
voffset += decoder->cfhddata.FrameOffsetY;
if (decoder->cfhddata.InvertOffset)
{
voffset = -voffset;
roffset = -roffset;
}
switch (decoder->StereoBufferFormat)
{
case DECODED_FORMAT_RGB32:
xbytes = width * 4;
xstep = 16;
break;
case DECODED_FORMAT_RGB24:
xbytes = width * 3;
xstep = 16;
break;
case DECODED_FORMAT_YUYV:
xbytes = width * 2;
xstep = 16;
break;
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_RG48:
default:
xbytes = width * 6;
xstep = 32;
break;
}
//DAN20100923 -- simplied
//voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5;
//voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep);
voffset += roffset * aspectfix * 0.5;
voffsetstep = -roffset * aspectfix / (float)(xbytes / xstep);
if (roffset == 0.0)
xstep = xbytes;
voffset1 = voffset2 = voffset;
voffsetstep1 = voffsetstep2 = voffsetstep;
if (channel_flip & 0xf)
{
if (channel_flip & 2)
{
voffset1 = -voffset1;
voffsetstep1 = -voffsetstep1;
}
if (channel_flip & 8)
{
voffset2 = -voffset2;
voffsetstep2 = -voffsetstep2;
}
if (channel_flip & 1)
{
voffset1 += voffsetstep1 * (xbytes / xstep);
voffsetstep1 = -voffsetstep1;
}
if (channel_flip & 4)
{
voffset2 += voffsetstep2 * (xbytes / xstep);
voffsetstep2 = -voffsetstep2;
}
}
for (x = 0; x < xbytes; x += xstep)
{
if (decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
else
{
RGB48VerticalShift(decoder, base, (unsigned short *)scratch,
xstep, height, local_pitch, voffset1);
}
if (decoder->channel_decodes == 2)
{
uint8_t *bptr = base + channel_offset;
RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch,
xstep, height, local_pitch, -voffset2);
}
base += xstep;
voffset1 += voffsetstep1;
voffset2 += voffsetstep2;
}
}
if (decoder->channel_mix_half_res == 1)
height *= 2;
if (ymin)
{
memset(local_output, 0, abs(local_pitch)); // zero one line;
}
for (y = 0; y < ymin; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
for (; y < ymax; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0);
}
for (; y < height; y++)
{
ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0);
}
}
#endif
}
// Decode a sample from the input bitstream into the output frame buffer
bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0};
int channel_decodes = 1; // 3D Work
int channel_offset = 0;
int channel_mask = 0;
int channel_current = 0;
//int wavelet_index;
bool result = true;
uint8_t *local_output = output;
uint8_t *local_buffer = NULL;
int local_pitch = pitch;
int internal_format = decoder->frame.format;
int output_format = decoder->frame.output_format;
bool use_local_buffer = false;
DECODER *local_decoder = decoder;
//Frame_Region emptyFrameMask = {0};
Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER;
int orig_width = decoder->frame.width;
int orig_height = decoder->frame.height;
decoder->local_output = local_output; // used for NV12 decodes.
decoder->sample_uncompressed = 0; // set if a uncompressed sample is found.
decoder->image_dev_only = 0;
if (decoder->flags & (1 << 3)) // This is an image development only decode.
{
decoder->sample_uncompressed = 1;
decoder->image_dev_only = 1;
decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444;
decoder->codec.unique_framenumber = 0; //What should this be?
decoder->frame.white_point = 16; // how to we pass this in?
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer;
switch (output_format & 0x7fffffff)
{
case COLOR_FORMAT_RGB24:
decoder->uncompressed_size = orig_width * orig_height * 3;
break;
case COLOR_FORMAT_RGB32:
decoder->uncompressed_size = orig_width * orig_height * 4;
break;
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_WP13:
decoder->uncompressed_size = orig_width * orig_height * 6;
break;
default:
decoder->uncompressed_size = orig_width * orig_height * 6;
assert(0);
break;
}
}
decoder->frame.alpha_Companded = 0; // reset this state.
if (decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = 0;
decoder->error = CODEC_ERROR_OKAY;
input->error = BITSTREAM_ERROR_OKAY;
// first time through encoded_format is not initized.
if (input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed
{
SAMPLE_HEADER header;
BITSTREAM input2;
InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ);
memset(&header, 0, sizeof(SAMPLE_HEADER));
header.find_lowpass_bands = 2; // help finding the uncompressed flag
if (ParseSampleHeader(&input2, &header))
{
decoder->codec.encoded_format = header.encoded_format;
decoder->sample_uncompressed = header.hdr_uncompressed;
if (decoder->parallelDecoder)
decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed;
}
}
if ((uintptr_t)input->lpCurrentBuffer & 0x3)
{
if (decoder->aligned_sample_buffer == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16);
#else
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
else
{
if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size)
{
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
}
else
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
FreeAligned(decoder->allocator, decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16);
#else
MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer);
decoder->aligned_sample_buffer =
(uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16);
#endif
memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength);
decoder->aligned_sample_buffer_size = input->dwBlockLength;
}
}
input->lpCurrentBuffer = decoder->aligned_sample_buffer;
input->lpCurrentWord = decoder->aligned_sample_buffer;
}
#if 0 // Test for missaligning the image data
if (((int)input->lpCurrentBuffer & 3) == 0)
{
int i;
uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer;
int missaligned = 1; //2 or 3
for (i = input->dwBlockLength - 1; i >= 0; i--)
ptr[i + missaligned] = ptr[missaligned];
input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned];
input->lpCurrentWord = (uint8_t *)&ptr[missaligned];
}
#endif
//HACK
// Unfortunately I need color matrix data deep within the codec for RT playback.
if (cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input
{
if (decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER)
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
if (size > sizeof(CFHDDATA))
{
// Limit the size to the known structure
size = sizeof(CFHDDATA);
}
memcpy(&decoder->cfhddata, cfhddata, size);
}
}
else
{
unsigned short value;
if (decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA))
{
memset(&decoder->cfhddata, 0, sizeof(CFHDDATA));
decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER;
decoder->cfhddata.size = sizeof(CFHDDATA);
if (decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if (GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value))
{
if (value == COLOR_FORMAT_RG48)
{
decoder->cfhddata.cfhd_subtype = 2; //RGB
decoder->cfhddata.num_channels = 3;
}
else if (value == COLOR_FORMAT_RG64)
{
decoder->cfhddata.cfhd_subtype = 3; //RGBA
decoder->cfhddata.num_channels = 4;
}
else if (value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END)
{
unsigned int format = BAYER_FORMAT_RED_GRN;
decoder->cfhddata.cfhd_subtype = 1; //BAYER
decoder->cfhddata.bayer_format = format; // default to Red-Grn
decoder->cfhddata.version = CFHDDATA_VERSION;
}
}
}
}
OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed);
if (decoder->image_dev_only) // HACK we need to support 3D also.
decoder->source_channels = 1;
else
decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0);
if (!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override))
{
//int channels = 0;
int channel_blend_type = BLEND_NONE;
int channel_swapped_flags = 0;
if (decoder->cfhddata.MSCTV_Override)
{
channel_mask = decoder->cfhddata.MSCTV_Override & 0xff;
channel_blend_type = ((decoder->cfhddata.MSCTV_Override >> 8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override >> 16) & 0xffff);
}
else
{
channel_mask = decoder->cfhddata.MSChannel_type_value & 0xff;
channel_blend_type = ((decoder->cfhddata.MSChannel_type_value >> 8) & 0xff);
channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value >> 16) & 0xffff);
}
if (channel_mask != 3)
{
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
//if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302
{
if (channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only
{
if (decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 2); // 3D work
}
}
else if (channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only
{
if (decoder->cfhddata.FramingFlags & 2) // channel swap
{
SkipVideoChannel(decoder, input, 1); // 3D work
}
else
{
//assume second channel decode
SkipVideoChannel(decoder, input, 2); // 3D work
}
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if (channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel
{
channel_current = 1;
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
else if ((channel_mask & 3) == 3) // A+B 3d work
{
channel_decodes = 2;
decoder->channel_mix_half_res = 0;
if (channel_blend_type != BLEND_NONE)
{
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
//if(decoder->frame.format == DECODED_FORMAT_W13A)
// {
// decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
// }
//else
//{
// decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
// }
decoder->frame.format = internal_format = DECODED_FORMAT_RGB32;
local_pitch = decoder->frame.width * 4;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RGB24;
local_pitch = decoder->frame.width * 3; //RGB24
}
/* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(output_format == DECODED_FORMAT_YUYV ||
output_format == DECODED_FORMAT_UYVY))
{
if( channel_blend_type == BLEND_FREEVIEW ||
((channel_blend_type == BLEND_STACKED_ANAMORPHIC ||
channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC ||
channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch = (decoder->frame.width) * 3; //RGB24
}
} */
}
/* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA
{
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}*/
/* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
} */
if ( decoder->frame.resolution == DECODED_RESOLUTION_FULL &&
(channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW))
{
if (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)
{
if (decoder->sample_uncompressed)
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF;
decoder->channel_mix_half_res = 1;
decoder->frame.width /= 2;
decoder->frame.height /= 2;
local_pitch /= 2;
}
else
{
if (decoder->preformatted_3D_type > BLEND_NONE)
{
// leave as is.
}
else if (FORMAT8BIT(output_format))
{
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL;
decoder->frame.width /= 2;
local_pitch /= 2;
}
}
}
else
{
if (FORMAT8BIT(output_format))
decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
//TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
}
if (channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC) // stacked, side-by-side, fields, Onion, YUY2
{
channel_offset = local_pitch * (decoder->frame.height);
}
else if (channel_blend_type >= BLEND_ANAGLYPH_RC)
{
/* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph
{
//B&W using YUYV
decoder->frame.format = internal_format = DECODED_FORMAT_YUYV;
local_pitch = decoder->frame.width * 2; //YUY2
}*/
channel_offset = local_pitch * (decoder->frame.height);
use_local_buffer = true;
}
else if (channel_blend_type == BLEND_NONE) // double high
{
channel_offset = pitch * decoder->frame.height;
}
else
{
channel_blend_type = BLEND_STACKED_ANAMORPHIC;
channel_offset = pitch * (decoder->frame.height / 2);
}
// fields, stacked, etc, only works on full or half res.
if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED &&
decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail.
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
if (channel_blend_type != BLEND_NONE &&
(output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 ))
{
channel_decodes = 1;
channel_blend_type = BLEND_NONE;
channel_swapped_flags = 0;
}
}
}
decoder->channel_decodes = channel_decodes;
decoder->channel_blend_type = channel_blend_type;
decoder->channel_swapped_flags = channel_swapped_flags;
}
else
{
decoder->channel_decodes = channel_decodes = 1;
decoder->channel_blend_type = BLEND_NONE;
decoder->channel_swapped_flags = 0;
}
if (cfhddata) // So the P-frames can know the bayerformat
{
//int size = cfhddata->size;
size_t size = cfhddata->size;
if (size > sizeof(CFHDDATA))
{
size = sizeof(CFHDDATA);
}
memcpy(cfhddata, &decoder->cfhddata, size);
}
{
bool doOrientation = true;
bool doFraming = true;
bool doBurins = true;
bool doImageflips = true;
bool doGhostBust = false;
bool doPrimaries = true;
int process_path_flags = decoder->cfhddata.process_path_flags;
int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask;
if (decoder->basic_only)
{
doOrientation = false;
doFraming = false;
doBurins = false;
doImageflips = false;
doPrimaries = false;
}
else
{
if (decoder->cfhddata.process_path_flags_mask)
{
//DAN20101007 --
if (process_path_flags == 0)
decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask;
process_path_flags &= decoder->cfhddata.process_path_flags_mask;
if (process_path_flags_mask & PROCESSING_ACTIVE2)
{
if (!(process_path_flags_mask & PROCESSING_ORIENTATION))
doOrientation = false;
if (!(process_path_flags_mask & PROCESSING_FRAMING))
doFraming = false;
if (!(process_path_flags_mask & PROCESSING_BURNINS))
doBurins = false;
if (!(process_path_flags_mask & PROCESSING_IMAGEFLIPS))
doImageflips = false;
}
if (!(process_path_flags_mask & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
if (process_path_flags & PROCESSING_ACTIVE2)
{
if (!(process_path_flags & PROCESSING_ORIENTATION))
doOrientation = false;
if (!(process_path_flags & PROCESSING_FRAMING))
doFraming = false;
if (!(process_path_flags & PROCESSING_BURNINS))
doBurins = false;
if (!(process_path_flags & PROCESSING_IMAGEFLIPS))
doImageflips = false;
if (!(process_path_flags & PROCESSING_COLORMATRIX))
doPrimaries = false;
}
}
if (doOrientation)
process_path_flags |= PROCESSING_ORIENTATION;
if (doFraming)
process_path_flags |= PROCESSING_FRAMING;
if (doBurins)
process_path_flags |= PROCESSING_BURNINS;
if (doImageflips)
process_path_flags |= PROCESSING_IMAGEFLIPS;
if (doPrimaries)
process_path_flags |= PROCESSING_COLORMATRIX;
if (decoder->channel_swapped_flags & FLAG3D_GHOSTBUST)
{
if (decoder->ghost_bust_left || decoder->ghost_bust_right)
{
doGhostBust = true;
}
}
decoder->cfhddata.process_path_flags = process_path_flags;
if ((!decoder->basic_only &&
(doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL ||
decoder->cfhddata.channel[0].FloatingWindowMaskR ||
decoder->cfhddata.channel[0].FrameKeyStone ||
decoder->cfhddata.channel[0].FrameTilt ||
decoder->cfhddata.channel[0].HorizontalOffset ||
decoder->cfhddata.channel[0].VerticalOffset ||
decoder->cfhddata.channel[0].RotationOffset ||
decoder->cfhddata.channel[1].FloatingWindowMaskL ||
decoder->cfhddata.channel[1].FloatingWindowMaskR ||
decoder->cfhddata.channel[1].FrameKeyStone ||
decoder->cfhddata.channel[1].FrameTilt ||
decoder->cfhddata.channel[1].HorizontalOffset ||
decoder->cfhddata.channel[1].VerticalOffset ||
decoder->cfhddata.channel[1].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 ||
decoder->cfhddata.channel[2].FloatingWindowMaskL ||
decoder->cfhddata.channel[2].FloatingWindowMaskR ||
decoder->cfhddata.channel[2].FrameKeyStone ||
decoder->cfhddata.channel[2].FrameTilt ||
decoder->cfhddata.channel[2].HorizontalOffset ||
decoder->cfhddata.channel[2].VerticalOffset ||
decoder->cfhddata.channel[2].RotationOffset ||
decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0)))
||
(doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 ||
decoder->cfhddata.channel[2].user_blur_sharpen != 0.0))
||
(doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[1].user_vignette_start != 0.0 ||
decoder->cfhddata.channel[2].user_vignette_start != 0.0))
||
(doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) ||
decoder->cfhddata.FrameOffsetX ||
decoder->cfhddata.FrameOffsetY ||
decoder->cfhddata.FrameOffsetR ||
decoder->cfhddata.FrameHScale != 1.0 ||
decoder->cfhddata.FrameHDynamic != 1.0 ||
decoder->cfhddata.channel[1].FrameZoom != 1.0 ||
decoder->cfhddata.channel[2].FrameZoom != 1.0))
||
(doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2))
||
(doImageflips && decoder->cfhddata.channel_flip)
||
(decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) ||
(decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) ||
(decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes.
( ((decoder->frame.width + 7) / 8) * 8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) ||
decoder->sample_uncompressed) ||
(decoder->cfhddata.doMesh)
)
{
if ( output_format == DECODED_FORMAT_BYR1 ||
output_format == DECODED_FORMAT_BYR2 ||
output_format == DECODED_FORMAT_BYR3 ||
output_format == DECODED_FORMAT_BYR4 )
{
// no manipulation should be applied
}
else
{
use_local_buffer = true;
local_pitch = ((decoder->frame.width + 7) / 8) * 8 * 6; //RGB48
if (decoder->image_dev_only)
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
else if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_W13A;
local_pitch = ((decoder->frame.width + 7) / 8) * 8 * 8;
}
else
{
decoder->frame.white_point = 13;
decoder->frame.format = internal_format = DECODED_FORMAT_WP13;
}
if ( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL ||
decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
local_pitch *= 2; // need horizontal room to make 3D side by side frame
}
/*
if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A)
{
// preserve HDR
decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output
if(output_format == DECODED_FORMAT_W13A)
local_pitch = decoder->frame.width * 8;
}
else
{
if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG64;
local_pitch = decoder->frame.width * 8;
}
else
{
decoder->frame.format = internal_format = DECODED_FORMAT_RG48;
}
}*/
channel_offset = local_pitch * (decoder->frame.height);
}
}
}
if (output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
if (decoder->BYR4LinearRestore == NULL)
{
int j, val;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
//int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
float encode_curvebase;
if (encode_curve_type) //1 or 2
{
if (encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
else
{
encode_curve_type = CURVE_TYPE_LOG;
encode_curvebase = 90.0;
}
#if _ALLOCATOR
decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator, 16384 * 2, 16);
#else
decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384 * 2, 16);
#endif
for (j = 0; j < 16384; j++) //0 to 1
{
switch (encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = (int)(CURVE_LOG2LIN((float)j / 16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_GAMMA:
val = (int)(CURVE_GAM2LIN((float)j / 16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINEON:
val = (int)(CURVE_CINEON2LIN((float)j / 16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_CINE985:
val = (int)(CURVE_CINE9852LIN((float)j / 16384.0f,
(float)encode_curvebase) * 65535.0f);
break;
case CURVE_TYPE_PARA:
val = (int)(CURVE_PARA2LIN((float)j / 16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_CSTYLE:
val = (int)(CURVE_CSTYLE2LIN((float)j / 16384.0f,
(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f);
break;
case CURVE_TYPE_SLOG:
val = (int)(CURVE_SLOG2LIN((float)j / 16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LOGC:
val = (int)(CURVE_LOGC2LIN((float)j / 16384.0f) * 65535.0f);
break;
case CURVE_TYPE_LINEAR:
default:
val = j;
break;
}
if (val < 0) val = 0;
if (val > 65535) val = 65535;
decoder->BYR4LinearRestore[j] = val;
}
}
}
//DAN20120319 - removed
/*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size
{
local_pitch *= 2;
channel_offset = local_pitch * (decoder->frame.height*2);
}*/
if (use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats
{
int stereoframesize = channel_offset * channel_decodes/*stacked frames*/;
if (decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE)
stereoframesize = channel_offset;
if (channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if (channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE)
stereoframesize = channel_offset * 2;
if (decoder->StereoBuffer == NULL || decoder->StereoBufferSize < stereoframesize)
{
#if _ALLOCATOR
if (decoder->StereoBuffer)
{
FreeAligned(decoder->allocator, decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize + 256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#else
if (decoder->StereoBuffer)
{
MEMORY_ALIGNED_FREE(decoder->StereoBuffer);
decoder->StereoBuffer = NULL;
}
decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize + 256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet.
#endif
assert(decoder->StereoBuffer != NULL);
if (! (decoder->StereoBuffer != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->StereoBufferSize = stereoframesize;
}
decoder->StereoBufferFormat = internal_format;
local_buffer = (uint8_t *)decoder->StereoBuffer;
local_output = local_buffer;
}
DecodeEntropyInit(decoder);
//swapped -- Maybe useful for double height decodes.
/* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED)
{
local_output += channel_offset;
channel_offset = -channel_offset;
}*/
decoder->use_local_buffer = use_local_buffer ? 1 : 0;
if (channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1)
{
int encoded_width = decoder->frame.width;
int encoded_height = decoder->frame.height;
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
encoded_height *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 4;
encoded_height *= 4;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_width *= 2;
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL)
{
// Compute the encoded dimensions from the frame dimensions
encoded_height *= 2;
}
#if _ALLOCATOR
decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER));
if (decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#else
decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER));
if (decoder->parallelDecoder)
{
memset(decoder->parallelDecoder, 0, sizeof(DECODER));
decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl;
DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height,
internal_format, DECODED_RESOLUTION_FULL, NULL);
}
#endif
}
// Using the parallel decoder?
if (decoder->parallelDecoder)
{
// Initialize the parallel decoder with parameters from the regular decoder
memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA));
DecodeEntropyInit(decoder->parallelDecoder);
decoder->parallelDecoder->channel_decodes = decoder->channel_decodes;
decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type;
decoder->parallelDecoder->flags = decoder->flags;
decoder->parallelDecoder->frame = decoder->frame;
decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0;
decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format;
if (decoder->parallelDecoder->decoder_thread.pool.thread_count == 0)
{
CreateLock(&decoder->parallelDecoder->decoder_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool,
1, //
ParallelThreadProc,
decoder->parallelDecoder);
}
}
if (channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count)
{
// Second stream as a thread.
BITSTREAM second_input = *input;
if (decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap
{
BITSTREAM leftEye_input = *input;
SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work
*input = leftEye_input;
SkipVideoChannel(decoder, &second_input, 1); // 3D work
}
else
SkipVideoChannel(decoder, &second_input, 2); // 3D work
decoder->channel_current = 0;
decoder->parallelDecoder->channel_current = 1;
// Instead of reading the metadata databases again, use the ones in the main decoder
OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed);
// Hack, this gets lost
decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position;
// Post a message to the mailbox
decoder->parallelDecoder->decoder_thread.input = &second_input;
if (use_local_buffer == false &&
(decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24))
{
decoder->parallelDecoder->decoder_thread.output = local_output;
local_output += channel_offset;
}
else
{
decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset;
}
decoder->parallelDecoder->decoder_thread.pitch = local_pitch;
decoder->parallelDecoder->decoder_thread.colorparams = colorparams;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START);
// do the first channel
{
TAGVALUE segment;
int sample_type;
#if _THREADED
decoder->entropy_worker_new.next_queue_num = 0;
decoder->entropy_worker_new.threads_used = 0;
#endif
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE))
{
decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool);
}
else
{
while (channel_decodes > 0)
{
TAGVALUE segment;
int sample_type;
local_decoder->channel_current = channel_current++;
#if _THREADED
local_decoder->entropy_worker_new.next_queue_num = 0;
local_decoder->entropy_worker_new.threads_used = 0;
#endif
if (decoder->image_dev_only)
{
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
}
else
{
// Get the type of sample
segment = GetTagValue(input);
assert(segment.tuple.tag == CODEC_TAG_SAMPLE);
if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE))
{
local_decoder->error = CODEC_ERROR_BITSTREAM;
STOP(tk_decompress);
return false;
}
sample_type = segment.tuple.value;
switch (sample_type)
{
case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame)
result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group
result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame
result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams);
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
// The video sequence header is ignored
result = true;
break;
default:
// Need to fill the output frame
//error = CODEC_ERROR_SAMPLE_TYPE;
result = false;
}
}
if (ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset))
{
channel_decodes = 0;
}
else
{
channel_decodes--;
local_output += channel_offset;
if (decoder->parallelDecoder)
{
local_decoder = decoder->parallelDecoder;
}
}
}
}
if (use_local_buffer && output)
{
decoder->use_local_buffer = 0;
ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset));
}
if (decoder->channel_mix_half_res) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
decoder->frame.height *= 2;
decoder->channel_mix_half_res = 0;
}
if ( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
decoder->frame.width *= 2;
}
if ( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK
{
decoder->frame.resolution = DECODED_RESOLUTION_FULL;
}
STOP(tk_decompress);
// Return indication of whether decoding succeeded or failed
return result;
}
// Decode a sample that encoded a group of frames (return the first frame)
bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index) / sizeof(subband_wavelet_index[0]);
#if (0 && DEBUG)
// Force quarter resolution decoding for debug that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoding sample group\n");
}
#endif
START(tk_decoding);
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
//segment = GetTagValue(input);
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY)
{
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done) break;
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if (codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_complete;
}
else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the temporal wavelet
int temporal_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[temporal_index];
#if (0 && DEBUG)
if (IsBandValid(wavelet, HIGHPASS_BAND))
{
int static count = 0;
if (count < 20)
{
char label[PATH_MAX];
sprintf(label, "Temporal-decode-%d-", count);
DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL);
}
count++;
}
#endif
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the temporal wavelet been decoded?
//if (wavelet && BANDS_ALL_VALID(wavelet))
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the temporal inverse transform to the processing queue
if (decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, temporal_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index,
precision, &decoder->scratch, 0 );
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
else if (wavelet == NULL)
{
// The temporal wavelet is not created during quarter resolution decoding
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_complete:
STOP(tk_decoding);
#if (0 && DEBUG)
if (logfile)
{
char label[PATH_MAX];
int channel;
for (channel = 0; channel < codec->num_channels; channel++)
{
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[2];
uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND];
int height = wavelet->height;
int pitch = wavelet->pitch;
int size = height * pitch;
int band;
for (band = 0; band < wavelet->num_bands; band++)
{
sprintf(label, "Temporal channel: %d, band: %d", channel, band);
DumpBandStatistics(label, wavelet, band, logfile);
#if 0
sprintf(label, "Temporal-channel%d-band%d-", channel, band);
DumpBandPGM(label, wavelet, band, NULL);
#endif
}
assert(size > 0);
memset(data, 0, size);
}
}
#endif
if (result)
{
// Two frames have been decoded
decoder->gop_length = 2;
decoder->frame_count += 2;
#if (DEBUG)
if (logfile)
{
fprintf(logfile,
"DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame in the group
if (!decoder->no_output)
{
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
if (decoder->error != CODEC_ERROR_OKAY)
{
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that represents the second frame in a group
bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
bool result = true;
START(tk_decoding);
// Decode the tag value pairs in the frame sample
for (;;)
{
TAGWORD tag;
TAGWORD value;
// Read the next tag value pair from the bitstream
//TAGVALUE segment = GetTagValue(input);
TAGVALUE segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY)
{
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
// Update the codec state with the information in the tag value pair
tag = segment.tuple.tag;
value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
}
// End of the frame header?
if (tag == CODEC_TAG_FRAME_INDEX) break;
}
STOP(tk_decoding);
#if (DEBUG)
if (logfile)
{
fprintf(logfile,
"DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
if (result)
{
// Return the second frame in the group
// assert(decoder->gop_length >= 2);
if (decoder->gop_length >= 2)
{
int frame_index = 1; // Display the second frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY)
{
result = false;
}
}
else if (decoder->gop_length > 0)
{
int frame_index = 0; // Display the first frame in the group
ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch);
if (decoder->error != CODEC_ERROR_OKAY)
{
result = false;
}
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Frame type that is not handled
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample that encodes an intra frame
bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int32_t frame_size = decoder->frame.height * pitch;
int resolution = decoder->frame.resolution;
bool result = true;
static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0};
static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3};
int num_subbands = sizeof(subband_wavelet_index) / sizeof(subband_wavelet_index[0]);
START(tk_decoding);
if (decoder->image_dev_only) goto decoding_completeI;
// Initialize the codec state
InitCodecState(&decoder->codec);
// Allocate the transform data structure for the group of frames
AllocDecoderGroup(decoder);
// Initialize the tables for decoding the wavelet transforms
InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands);
// Clear the flags in the wavelet transforms
ClearTransformFlags(decoder);
//Force V210 output for debugging ***DEBUG***
//decoder->frame.format = DECODED_FORMAT_V210;
// Process the tag value pairs until an encoded subband is found
for (;;)
{
TAGVALUE segment;
// Read the next tag value pair from the bitstream
segment = GetSegment(input);
assert(input->error == BITSTREAM_ERROR_OKAY);
if (input->error != BITSTREAM_ERROR_OKAY)
{
decoder->error = CODEC_ERROR_BITSTREAM;
result = false;
break;
}
{
TAGWORD tag = segment.tuple.tag;
TAGWORD value = segment.tuple.value;
// Use the tag value pair to update the codec state
error = UpdateCodecState(decoder, input, codec, tag, value);
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
decoder->error = error;
result = false;
break;
//NOTE: Consider moving the error code into the codec state
}
}
// Check whether the group has been decoded
if (codec->sample_done)
{
break;
}
// Skip the rest of the current channel?
if (CanSkipChannel(decoder, resolution))
{
if (codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY))
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
codec->num_channels = 3;
goto decoding_completeI;
}
else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
else
{
// Compute the bitstream position after the current channel
int channel = codec->channel;
uint32_t channel_size = codec->channel_size[channel];
uint8_t *position = codec->channel_position + channel_size;
// Get the highest wavelet in the pyramid
int wavelet_index = 2;
TRANSFORM *transform = decoder->transform[channel];
IMAGE *wavelet = transform->wavelet[wavelet_index];
#if _THREADED_DECODER
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
//if (DecodedBandsValid(wavelet, temporal_index))
if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER))
#else
// Have all bands in the wavelet been decoded?
if (AllBandsValid(wavelet))
#endif
{
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile)
{
char label[PATH_MAX];
int band;
sprintf(label, "Channel: %d, index: %d", channel, wavelet_index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
#if (0 & DEBUG)
if (logfile)
{
fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n");
}
#endif
#if _THREADED_DECODER
// Add the inverse spatial transform to the processing queue
if (decoder->entropy_worker_new.pool.thread_count)
{
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 1);
QueueThreadedTransform(decoder, channel, wavelet_index);
}
else
#endif
{
// Reconstruct the lowpass bands in the first level wavelets
//ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size);
ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index,
precision, &decoder->scratch, 0);
}
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
// Note that the subband flags are also reset when the channel header is decoded
}
// Was the wavelet created?
//else if (wavelet == NULL)
else
{
// The wavelet may not have been created during quarter resolution decoding
// The wavelet should have been created if all bands are valid
assert(wavelet != NULL);
// Advance the bitstream to the next channel
SetBitstreamPosition(input, position);
// Reset the decoded subband flags (otherwise this code will be executed again)
codec->decoded_subband_flags = 0;
}
//TODO: Improve quarter resolution decoding so that the wavelet is created?
}
}
}
decoding_completeI:
STOP(tk_decoding);
if (result)
{
// One frame has been decoded
decoder->gop_length = 1;
decoder->frame_count += 1;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile,
"DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n",
decoder, decoder->gop_length);
}
#endif
// Return the first frame (the only frame that was decoded)
if (!decoder->no_output)
{
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
//CODEC_STATE *codec = &decoder->codec;
TRANSFORM **transform_array = decoder->transform;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
FRAME_INFO *info = &decoder->frame;
int precision = codec->precision;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision);
}
else
{
// Finish computing the output frame
ReconstructSampleFrameToBuffer(decoder, 0, output, pitch);
}
}
if (decoder->error != CODEC_ERROR_OKAY)
{
result = false;
}
#if TIMING
// Increment the count of bytes that have been decoded
decode_byte_count += (COUNTER)BitstreamByteCount(input);
#endif
}
if (!result)
{
// Check that the frame can be cleared
assert(frame_size > 0);
if (frame_size > 0)
{
// Zero the frame
memset(output, 0, frame_size);
}
}
return result;
}
// Decode a sample channel header
bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_ERROR error = CODEC_ERROR_OKAY;
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
CHANNEL_HEADER header;
TRANSFORM *transform = decoder->transform[channel];
TRANSFORM *next_transform;
// Advance to the next channel
channel++;
// Get the next transform for decoded information
//TRANSFORM *next_transform = AllocGroupTransform(group, channel);
// Decode the rest of the channel header
error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL);
assert(error == CODEC_ERROR_OKAY);
decoder->error = error;
if (error != CODEC_ERROR_OKAY) return false;
// The decoder is not able to skip channels
assert(header.channel == channel);
// Initialize the next transform using the previous one
next_transform = decoder->transform[channel];
InitChannelTransform(next_transform, transform);
// Update the channel
codec->channel = channel;
// Reset the subband counter
codec->band.subband = 0;
// Reset the decoded subband flags
codec->decoded_subband_flags = 0;
// Loop back to decode the next channel
//transform = next_transform;
return true;
}
// Decode the coefficients in a subband
bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
TRANSFORM *transform = decoder->transform[channel];
int *subband_wavelet_index = decoder->subband_wavelet_index;
// Used for quarter resolution and threaded decoding
int transform_type = transform->type;
// Wavelet parameters
int width;
int height;
int level;
int type;
int band;
int threading = 1;
// Wavelet containing the band to decode
int index;
IMAGE *wavelet = NULL;
bool result;
if (subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS)
threading = 0;
// Update the transform data structure from the codec state
UpdateCodecTransform(transform, codec);
// Is this an empty band?
if (subband == 255)
{
// Decode an empty band
// This wavelet is the temporal wavelet
index = 2;
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
// The empty band should be the highpass band in a temporal wavelet
assert(type == WAVELET_TYPE_TEMPORAL && band == 1);
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// Set the wavelet parameters
wavelet->pixel_type[band] = PIXEL_TYPE_16S;
wavelet->num_bands = 2;
result = DecodeSampleEmptyBand(decoder, input, wavelet, band);
// Set the subband number for the next band expected in the bitstream
codec->band.subband = 11;
}
// Is this a highpass band?
else if (subband > 0)
{
// Decode a highpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[subband];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->band.width;
height = codec->band.height;
level = codec->highpass.wavelet_level;
type = codec->highpass.wavelet_type;
band = codec->band.number;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandStartedFlags(decoder, wavelet, band);
}
// Reset the default encoding method
codec->band.encoding = BAND_ENCODING_RUNLENGTHS;
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
else
{
// Decode a lowpass band
// Get the wavelet that contains this subband
index = subband_wavelet_index[0];
wavelet = transform->wavelet[index];
// Get the wavelet parameters decoded from the bitstream
width = codec->lowpass.width;
height = codec->lowpass.height;
level = codec->lowpass.level;
type = codec->first_wavelet;
//band = codec->band.number;
band = 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type);
#else
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
#endif
// The lowpass data is always stored in wavelet band zero
assert(band == 0);
// The lowpass band must be subband zero
assert(subband == 0);
result = DecodeSampleLowPassBand(decoder, input, wavelet);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
// Set the subband number for the next band expected in the bitstream
codec->band.subband = subband + 1;
}
// Was the subband successfully decoded?
if (result)
{
// The transform will set the band valid flag if this is the temporal wavelet
//if (index != 2)
// Record that this subband has been decoded successfully
if (0 <= subband && subband <= CODEC_MAX_SUBBAND)
codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband);
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n",
subband, index, channel);
}
#endif
}
#if _THREADED_DECODER
// Ready to queue a threaded transform to invert this wavelet?
if (BANDS_ALL_STARTED(wavelet))
{
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index)
{
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2)
{
if (decoder->entropy_worker_new.pool.thread_count && threading)
{
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 1);
// Add the inverse wavelet transform to the processing queue
QueueThreadedTransform(decoder, codec->channel, index);
}
else
{
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index,
codec->precision, &decoder->scratch, 0);
}
}
}
#else
// Ready to invert this wavelet to get the lowpass band in the lower wavelet?
if (BANDS_ALL_VALID(wavelet))
{
int channel = codec->channel;
//PIXEL *buffer = (PIXEL *)decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
#if (0 && DEBUG)
if (logfile)
{
char label[PATH_MAX];
int band;
sprintf(label, "Channel: %d, index: %d", channel, index);
DumpImageStatistics(label, wavelet, logfile);
#if 1
for (band = 1; band < wavelet->num_bands; band++)
{
sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band);
DumpBandStatistics(label, wavelet, band, logfile);
}
#endif
}
#endif
// Are frames being decoded to quarter resolution?
if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER))
{
// Smallest spatial wavelet above the lowpass temporal band (fieldplus transform)
int highest_index = 5;
if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Smallest wavelet in the spatial transform
highest_index = 2;
}
// Only the smallest spatial wavelet must be reconstructed
if (index != highest_index)
{
return result;
}
//TODO: Can we improve on the current scheme for quarter resolution decoding?
}
// Apply the inverse wavelet transform to reconstruct the lower level wavelet
ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0);
}
#endif
return result;
}
// Decode the coefficients in a lowpass band
bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int channel = codec->channel;
bool result = true;
int lowpass_width; // Lowpass band dimensions
int lowpass_height;
int lowpass_pitch;
PIXEL *pLowPassRow; // Pointer into the lowpass band
//int wavelet_width; // Dimensions of the wavelet image
//int wavelet_height;
int bits_per_pixel;
int quantization;
int offset;
//int pixel_divisor = (1 << (2 * codec->lowpass.level));
int row, column;
int32_t solid_color = -1;
const int gain = 128;
const int colorshift = 0;
// int channelgain[4];
//int waterrow=19, watercol=214;
//int cspace = decoder->frame.colorspace;
// Lowpass image dimensions may be smaller than the wavelet dimensions
// because the encoder may have transmitted an image without the border
lowpass_width = codec->lowpass.width;
lowpass_height = codec->lowpass.height;
lowpass_pitch = wavelet->pitch / sizeof(PIXEL);
pLowPassRow = wavelet->band[0];
// Get the parameters for quantization performed by the encoder
quantization = codec->lowpass.quantization;
offset = codec->lowpass.pixel_offset;
bits_per_pixel = codec->lowpass.bits_per_pixel;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decode lowpass subband\n");
}
#endif
if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width & 1))
{
int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord;
//int signval = 0;
//int channel3stats = 0;
int channeloffset = 0;
if (decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames == 2 ? 64 : 32);
}
else if (decoder->codec.precision == 10)
{
switch (decoder->frame.format)
{
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
channeloffset = codec->num_frames == 2 ? 14 : 4; //DAN20090601, recal I-frame DAN20110301
break;
default:
channeloffset = codec->num_frames == 2 ? 48 : 24; //DAN20090601
}
if (decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?)
channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames
}
else if (decoder->codec.precision == 12)
{
switch (decoder->frame.format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
channeloffset = 8; //DAN200906010
break;
// 16-bit precision:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
channeloffset = 0;
break;
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed.
break;
default:
channeloffset = 0;
break;
}
}
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames
channeloffset = 0;
#define DUMPLL 0
#if (_DEBUG && DUMPLL)
FILE *fp;
if (channel == 0)
{
static int inc = 1;
char name[256];
sprintf(name, "C:\\Cedoc\\LLdec%03d.pgm", inc++);
fp = fopen(name, "w");
fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height);
}
#endif
#if LOSSLESS
channeloffset = 0; //LOSSLESS
#endif
//if(lpCurrentLong[0] == 0xffffffff)
if (lpCurrentLong[0] == (int32_t)UINT32_MAX)
{
if (SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width)
{
if (SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height)
{
solid_color = SwapInt32BtoN(lpCurrentLong[1]);
solid_color |= (solid_color << 16);
lpCurrentLong += 4;
}
}
}
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
int pixels;
// Start at the first column
column = 0;
// Process the rest of the row
{
for (; column < lowpass_width; column++)
{
int pixel_value;
//int i;
// Perform inverse quantization
if (column & 1)
{
pixel_value = pixels;
}
else
{
//pixels = _bswap(*(lpCurrentLong++));
if (solid_color == -1)
pixels = SwapInt32BtoN(*(lpCurrentLong++));
else
pixels = solid_color;
pixel_value = (pixels >> 16);
pixels <<= 16;
pixels >>= 16;
}
// Store the pixel in the lowpass band of the wavelet
pixel_value += channeloffset;
// pixel_value -= 64;
// pixel_value += ((rand() & 0x7fff) - 0x4000);
// if(pixel_value < 0) pixel_value = 0;
if (pixel_value > 0x7fff) pixel_value = 0x7fff;
pLowPassRow[column] = pixel_value;
#if (_DEBUG && DUMPLL)
if (channel == 0 && fp)
fprintf(fp, "%d\n", pixel_value >> 7);
#endif
}
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if (_DEBUG && DUMPLL)
if (channel == 0 && fp)
fclose(fp);
#endif
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentLong;
}
else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE)
{
uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord;
//int signval = 0;
// Decode each row in the lowpass image
for (row = 0; row < lowpass_height; row++)
{
// Start at the first column
column = 0;
// Process the rest of the row
for (; column < lowpass_width; column++)
{
int pixel_value = *(lpCurrentByte++);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
pixel_value -= 128 * quantization;
pixel_value *= gain;
pixel_value >>= 7;
pixel_value += 128 * quantization;
pixel_value += colorshift;
// Store the pixel in the lowpass band of the wavelet
// Multiply by 16 to turn 8-bit into the new 16-bit format
pLowPassRow[column] = pixel_value * 16;
}
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord));
#endif
// Update the bitstream
stream->lpCurrentWord = (uint8_t *)lpCurrentByte;
}
else
{
int channeloffset = 0;
if (decoder->codec.precision == 8)
{
channeloffset = (codec->num_frames == 2 ? 64 : 32);
}
else if (decoder->codec.precision == 10)
{
channeloffset = (codec->num_frames == 2 ? 10 : 5);
}
else if (decoder->codec.precision == 12)
{
// channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images
}
//DAN20050923 no longer trying to compensate for YUV to RGB issues.
if (decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32)
{
if (decoder->codec.precision == 8)
{
switch (channel)
{
case 0:
channeloffset += 8;
break; // fixed rounding error introduced by YUV->RGB
case 1:
channeloffset += 16;
break;
case 2:
channeloffset += 10;
break;
}
}
else if (decoder->codec.precision == 10)
{
switch (channel)
{
case 0:
channeloffset += -8;
break; // fixed rounding error introduced by YUV->RGB
case 1:
channeloffset += -4;
break;
case 2:
channeloffset += -4;
break;
}
}
else if (decoder->codec.precision == 12)
{
switch (channel)
{
case 0:
channeloffset += 0;
break; // fixed rounding error introduced by YUV->RGB
case 1:
channeloffset += 0;
break;
case 2:
channeloffset += 0;
break;
}
}
}
if (bits_per_pixel != 16)
channeloffset = 0;
for (row = 0; row < lowpass_height; row++)
{
for (column = 0; column < lowpass_width; column++)
{
int pixel_value = GetBits(stream, bits_per_pixel);
// Perform inverse quantization
#if _ENCODE_CHROMA_ZERO
if (channel == 0)
pixel_value = (quantization * pixel_value) + offset;
else
pixel_value = (pixel_value - offset) * quantization;
#else
pixel_value = (quantization * pixel_value) + offset;// + colorshift;
#endif
// Store the pixel in the lowpass band of the wavelet
pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes
}
stream->nWordsUsed -= lowpass_width * (bits_per_pixel >> 3);
// Advance to the next row in the lowpass image
pLowPassRow += lowpass_pitch;
}
}
// Set the wavelet scale factor
wavelet->scale[0] = quantization;
// Align the bitstream to the next tag value pair
AlignBitsTag(stream);
// Return indication of lowpass decoding success
return result;
}
// Decode the coefficients in a highpass band
bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int index = codec->highpass.wavelet_number;
int width;
int height;
int quantization;
// The encoder may not have used variable-length coding
int method = codec->band.encoding;
bool result = true;
// Check that the band index is in range
assert(0 <= band && band <= codec->max_subband);
// Encoded coefficients start on a tag boundary
AlignBitsTag(stream);
#if (0 && DEBUG)
// Dump the band header to the logfile
if (logfile)
{
fprintf(logfile,
"Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n",
header->marker, header->subband, header->width, header->height, header->encoding);
}
#endif
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply this parameter)
if (codec->band.scale > 0)
{
wavelet->scale[band] = codec->band.scale;
}
// Get the quantization factor that was used to encode the band coefficients
quantization = codec->band.quantization;
// Copy the quantization into the wavelet
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization);
}
#endif
// Get the highpass band dimensions
width = codec->band.width;
height = codec->band.height;
// Is this a special band for the temporal high pass thumbnail?
if (method == BAND_ENCODING_LOSSLESS)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else if (method == BAND_ENCODING_16BIT)
{
//lossless temporal subband //DAN20060701
result = DecodeBand16s(decoder, stream, wavelet, band, width, height);
assert(result);
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band);
}
}
else
{
// Must use the runlength encoding method
assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS);
#if 0
// This code attempts to not decode various subbands for 1/4 res decodes.
// Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.)
if (subband >= 4 && subband <= 6)
{
TAGVALUE segment;
AlignBitsTag(stream);
do
{
segment = GetTagValue(stream);
} while (segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
else
#elif 0
// Is this subband required for decoding the frame?
if (CanSkipSubband(decoder, subband))
{
// Skip past the end of this subband
SkipSubband(stream);
}
#endif
// Decode this subband
result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading);
}
// Return failure if a problem was encountered while reading the band coefficients
if (!result) return result;
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Error in band %d trailer: %d\n", band, error);
}
#endif
return false;
}
return result;
}
// Decode an empty band
bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int quantization;
// Check that the band is in range
assert(0 <= band && band <= CODEC_MAX_HIGHBANDS);
// Check that the highpass band is 16 bits
assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S);
#if (0 && DEBUG)
//TODO: Change format string to handle 64-bit pointers
if (logfile)
{
fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord);
}
#endif
// Encoded coefficients must start on a word boundary
AlignBits(stream);
// Copy the scale factors used by the encoder into the wavelet band
// (Zero means that the encoder did not supply the parameter)
if (codec->band.scale > 0)
wavelet->scale[band] = codec->band.scale;
// Set the quantization used to encode the band coefficients
quantization = codec->band.quantization;
wavelet->quantization[band] = quantization;
#if (0 && DEBUG)
if (logfile)
{
DumpBits(stream, logfile);
}
#endif
// Decode the band trailer
error = DecodeBandTrailer(stream, NULL);
decoder->error = error;
assert(error == CODEC_ERROR_OKAY);
if (error != CODEC_ERROR_OKAY)
{
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Error in band: %d, error: %d\n", band, error);
}
#endif
return false;
}
// The encoded band coefficients end on a bitstream word boundary
// to avoid interference with the marker for the coefficient band trailer
AlignBits(stream);
#if (0 && DEBUG)
// Dump the band trailer to the logfile
if (logfile)
{
fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker);
}
#endif
#if (0 && DEBUG)
if (logfile)
{
//TODO: Change format string to handle 64-bit pointers
fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord);
}
#endif
return true;
}
bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
PIXEL *rowptr = wavelet->band[band_index];
int pitch = wavelet->pitch;
int row, dequant = wavelet->quantization[band_index];
// Convert the pitch from bytes to pixels
pitch /= sizeof(PIXEL);
//BAND_ENCODING_16BIT
if (dequant == 1)
{
for (row = 0; row < height; row++)
{
int column;
#if 0
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value;
}
#else // Mild speedup (2.5% overall half-res decode improvement.)
char *sptr = (char *)stream->lpCurrentWord;
char *dptr = (char *)rowptr;
for (column = 0; column < width; column++)
{
*(dptr + 1) = *sptr++;
*dptr = *sptr++;
dptr += 2;
}
stream->lpCurrentWord += width * 2;
stream->nWordsUsed += width * 2;
#endif
rowptr += pitch;
}
}
else
{
for (row = 0; row < height; row++)
{
int column;
for (column = 0; column < width; column++)
{
int value = GetWord16s(stream);
rowptr[column] = value * dequant;
}
rowptr += pitch;
}
}
#if (0 && DEBUG)
{
int static count = 0;
if (count < 20)
{
char label[PATH_MAX];
sprintf(label, "Hightemp-decode-%d-", count);
DumpBandPGM(label, wavelet, band_index, NULL);
}
count++;
}
#endif
return true;
}
bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
//CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
//int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
//int threading = 0;
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (! (wavelet != NULL))
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//Must have a valid FSM
assert(fsm != NULL);
if (! (fsm != NULL))
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// All rows are treated as one int32_t row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0)
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch;
assert(rowptr != NULL && pitch != 0);
if (! (rowptr != NULL && pitch != 0))
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes.
if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant))
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
if (quant)
{
int x, y;
PIXEL *line = rowptr;
if (quant == 32)
{
for (y = 0; y < height; y++)
{
for (x = 0; x < width; x++)
{
line[x] <<= 5;
}
line += pitch / 2;
}
}
else
{
for (y = 0; y < height; y++)
{
for (x = 0; x < width; x++)
{
line[x] *= quant;
}
line += pitch / 2;
}
}
}
assert(result == true);
if (! (result == true))
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
return true;
}
// Invert the wavelet to reconstruct the lower wavelet in the transform
void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel,
IMAGE *wavelet, int index, int precision,
const SCRATCH *scratch, int allocations_only)
{
int transform_type = transform->type;
int width = wavelet->width;
int height = wavelet->height;
int level = wavelet->level;
PIXEL *buffer = (PIXEL *)scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Is the current wavelet a spatial wavelet?
if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0)
{
// Reconstruct the lowpass band in the lower wavelet
int lowpass_index = index - 1;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL;
//const int prescale = 1;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
// Check that the lowpass band has not already been reconstructed
//assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
if (!allocations_only)
{
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Has this wavelet already been reconstructed?
if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0)
{
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
}
// Is the current wavelet a spatial wavelet above the temporal lowpass band?
else if (index > 3)
{
// Reconstruct the lowpass band in the lower wavelet
const int temporal_wavelet_index = 2;
int lowpass_index = (index > 4) ? index - 1 : index - 2;
IMAGE *lowpass = transform->wavelet[lowpass_index];
int lowpass_width = 2 * width;
int lowpass_height = 2 * height;
int lowpass_level = level - 1;
int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
//const int prescale = 2;
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = transform->prescale[index];
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index,
lowpass_width, lowpass_height,
lowpass_level, lowpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#else
lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type);
#endif
transform->wavelet[lowpass_index] = lowpass;
#endif
if (!allocations_only)
{
// Check that the lowpass band has not already been reconstructed
assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
//TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale);
TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, lowpass, 0);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the spatial wavelet above the temporal highpass band?
else if (index == 3)
{
// Reconstruct the highpass band in the temporal wavelet
const int temporal_wavelet_index = 2;
int highpass_index = index - 1;
IMAGE *highpass = transform->wavelet[highpass_index];
int highpass_width = 2 * width;
int highpass_height = 2 * height;
int highpass_level = level - 1;
int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL);
const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT);
int prescale = inverse_prescale ? transform->prescale[index] : 0;
#if _THREADED_DECODER
// Allocate (or reallocate) the wavelet with thread safety
highpass = GetWaveletThreadSafe(decoder, transform, highpass_index,
highpass_width, highpass_height,
highpass_level, highpass_type);
#else
// Allocate the wavelet if not already allocated
#if _ALLOCATOR
highpass = ReallocWaveletEx(decoder->allocator, highpass, highpass_width, highpass_height, highpass_level, highpass_type);
#else
highpass = ReallocWaveletEx(highpass, highpass_width, highpass_height, highpass_level, highpass_type);
#endif
transform->wavelet[highpass_index] = highpass;
#endif
if (!allocations_only)
{
// Check that the highpass band has not already been reconstructed
assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(wavelet));
// Perform the inverse spatial transform before decoding the next wavelet
STOP(tk_decoding);
START(tk_inverse);
TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale);
STOP(tk_inverse);
START(tk_decoding);
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, highpass, 1);
#if TIMING
// Increment the count of spatial transforms performed during decoding
spatial_decoding_count++;
#endif
}
}
// Is the current wavelet the temporal wavelet?
else if (index == 2)
{
// Get the temporal wavelet
IMAGE *temporal = wavelet;
// Set the frame wavelet parameters
int frame_level = 1;
int frame_type = WAVELET_TYPE_FRAME;
// Get the two frame wavelets
IMAGE *frame[2];
frame[0] = transform->wavelet[0];
frame[1] = transform->wavelet[1];
// Check that the temporal wavelet is valid
assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL);
#if _THREADED_DECODER
// Allocate (or reallocate) the frame wavelets with thread safety
frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type);
frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type);
#else
// Allocate the frame wavelets if not already allocated
#if _ALLOCATOR
frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type);
#else
frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type);
frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type);
#endif
transform->wavelet[0] = frame[0];
transform->wavelet[1] = frame[1];
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Before inverse temporal transform");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
}
#endif
if (!allocations_only)
{
// Check that the lowpass bands have not already been reconstructed
assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0);
// Check that all of the wavelet bands have been decoded
assert(BANDS_ALL_VALID(temporal));
// Invert the temporal transform between the frame wavelets
STOP(tk_decoding);
START(tk_inverse);
TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision);
STOP(tk_inverse);
START(tk_decoding);
#if (0 && DEBUG)
if (logfile)
{
IMAGE *wavelet = quad[0];
fprintf(logfile, "After inverse temporal transform\n");
DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile);
DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, frame[0], 0);
UpdateWaveletBandValidFlags(decoder, frame[1], 0);
#if TIMING
// Increment the number of temporal transforms performed outside of decoding
temporal_decoding_count++;
#endif
}
}
}
// Compute the dimensions of the output buffer
void ComputeOutputDimensions(DECODER *decoder, int frame,
int *decoded_width_out, int *decoded_height_out)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
FRAME_INFO *info = &decoder->frame;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet = NULL;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
int decoded_scale = 0;
if (decoded_width_out == NULL || decoded_height_out == NULL)
{
return;
}
// Clear the return values in case this routine terminates early
*decoded_width_out = 0;
*decoded_height_out = 0;
// Get the decoding scale
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
}
else
{
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
}
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
if (wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
break;
default:
assert(0);
break;
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
if (resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
decoded_width = wavelet_width;
else
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
// Return the decoded width and height
*decoded_width_out = decoded_width;
*decoded_height_out = decoded_height;
}
#define DEBUG_ROW16U 0
void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
FRAME_INFO local_info;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &local_info;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
IMAGE *wavelet;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
int resolution = decoder->frame.resolution;
int chroma_offset = decoder->codec.chroma_offset;
int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed;
//TODO: Change this routine to return the codec error code
CODEC_ERROR error = CODEC_ERROR_OKAY;
//TODO: Change this routine to return an error code
if (decoder == NULL)
{
return;
}
decoder->gop_frame_num = frame;
#if _THREADED_DECODER
// Wait until the transform thread has finished all pending transforms
WaitForTransformThread(decoder);
#endif
//return;
// copy frame info in a changable local structure
memcpy(info, &decoder->frame, sizeof(FRAME_INFO));
// Use the old code for reconstructing the frame
#if (0 && DEBUG)
// Force quarter resolution decoding for debugging that feature
resolution = DECODED_RESOLUTION_QUARTER;
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame);
}
#endif
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0) return;
#if (DEBUG_ROW16U)
// Force decoding to 16-bit pixels for debugging
info->format = DECODED_FORMAT_YR16;
#endif
#if 0
if (info->format == DECODED_FORMAT_YR16)
{
// Force interlaced or progressive decoding for debugging
//progressive = false;
progressive = true;
}
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0)
{
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoder discarding frame: %d\n", frame);
}
#endif
return;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution))
{
decoder->error = CODEC_ERROR_RESOLUTION;
return;
}
#if (0 && TIMING) //(0 && DEBUG)
// Override progressive flag read from the bitstream for debugging
//progressive = 0; // Use the inverse frame transform
progressive = 1; // Use the inverse spatial transform
#endif
// Build the 3D LUTs if needed
ComputeCube(decoder);
//HACK DAN20110131 -- some formats will not directly decode so need to use the AM route
{
if ( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 &&
resolution == DECODED_RESOLUTION_HALF)
{
if ( decoder->frame.format == COLOR_FORMAT_R408 ||
decoder->frame.format == COLOR_FORMAT_V408)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
if ( decoder->frame.format == COLOR_FORMAT_NV12)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true; // TODO, make it work with this.
}
if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24)
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
// Get the decoding scale
if (!uncompressed)
{
switch (resolution)
{
case DECODED_RESOLUTION_FULL:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = 2 * wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_HALF_HORIZONTAL:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = 2 * wavelet_height;
break;
case DECODED_RESOLUTION_QUARTER:
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame));
#endif
wavelet = transform_array[0]->wavelet[0];
}
else
{
wavelet = transform_array[0]->wavelet[3];
}
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
wavelet = transform_array[0]->wavelet[5];
if (wavelet == NULL) // there Intra Frame compressed
wavelet = transform_array[0]->wavelet[2];
// Get the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = wavelet_width;
decoded_height = wavelet_height;
break;
default:
assert(0);
break;
}
}
else
{
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
decoded_width = info->width / 2;
decoded_height = info->height / 2;
}
else
{
decoded_width = info->width;
decoded_height = info->height;
}
}
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
if (resolution == DECODED_RESOLUTION_FULL)
{
if (decoded_width * 2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER;
}
}
else if (resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
if (decoded_width * 2 == info->width)
{
info->width /= 2;
info->height /= 2;
}
}
else if (resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
if (decoded_width * 2 == info->width)
{
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER;
}
}
else if (decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4)
{
if (decoded_width * 2 == info->width)
{
info->width /= 2;
info->height /= 2;
info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER;
}
}
else
{
if (resolution == DECODED_RESOLUTION_HALF)
{
if (decoded_width * 2 == info->width)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_FULL;
}
}
else if (resolution == DECODED_RESOLUTION_QUARTER)
{
if (uncompressed)
{
decoded_width *= 2;
decoded_height *= 2;
info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED;
}
else
{
if (decoded_width == info->width)
{
info->resolution = resolution = DECODED_RESOLUTION_HALF;
}
}
}
}
}
if (uncompressed)
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210)
error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0)
error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
else
{
// Call the appropriate routine for the encoded format
switch (decoder->codec.encoded_format)
{
case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4
case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4
error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4
// Not implemented
assert(0);
//error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch);
break;
case ENCODED_FORMAT_BAYER: // Bayer encoded data
// Add new code here for the final steps in decoding the Bayer format
error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch);
break;
case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2
// Add new code here for the final steps in decoding the original YUV 4:2:2 format
error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch);
break;
default:
// Fall through into the old code for reconstructing frames
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
}
// Was the newer code able to successfully reconstruct the frame?
if (error != CODEC_ERROR_UNSUPPORTED_FORMAT)
{
// Save the codec error code in the decoder state and return
decoder->error = error;
return;
}
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width);
}
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n",
decoded_width, decoded_height, info->width, info->height, pitch);
}
#endif
#if (0 && DEBUG)
if (logfile)
{
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile)
{
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
assert((info->height + 7) / 8 >= (decoded_height + 7) / 8);
if (!(info->width >= decoded_width && (info->height + 7) / 8 >= (decoded_height + 7) / 8))
{
decoder->error = CODEC_ERROR_FRAMESIZE;
return;
}
#if (0 && DEBUG)
if (logfile)
{
//SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16);
SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16);
// Adjust the subimage to be at the middle of the right border
//subimage.row += wavelet_height/2 - 8;
DumpBand("SIF Image", wavelet, 0, &subimage, logfile);
}
#endif
START(tk_inverse);
if (resolution == DECODED_RESOLUTION_QUARTER)
{
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
}
else
// Was the first transform a frame transform (used for interlaced frames)?
if (!progressive)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY))
{
// Apply the inverse frame transform and pack the results into the output buffer
int precision = codec->precision;
#if (0 && DEBUG)
DumpWaveletBandsPGM(wavelet, frame, num_channels);
#endif
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
int precision = codec->precision;
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
int scale = 13;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if (lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
int precision = codec->precision;
TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
}
}
}
else // The first transform was a spatial transform (used for progressive frames)
{
// Can the inverse frame transform and output byte packing be done in one pass?
if ((resolution == DECODED_RESOLUTION_FULL) &&
(info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
uint8_t *pixoutput = output;
if (decoder->use_active_metadata_decoder) //WIP
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToBayerYUV);
}
}
else if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
}
#else
//TODO : Accelerated BAYER for single thread decoding.
assert(0);
// Transform the wavelets for each channel to the output image (not threaded)
//TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info,
// &decoder->scratch, chroma_offset, precision);
#endif
}
else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER &&
(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB
decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder)
{
int precision = codec->precision;
//DWORD dwThreadID1;
//DWORD dwThreadID2;
//HANDLE thread1;
//HANDLE thread2;
// Apply the inverse frame transform and pack the results into the output buffer
#if _THREADED
{
uint8_t *pixoutput = output;
if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
pixoutput += (info->height - 1) * pitch;
pitch = -pitch;
}
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
pixoutput, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sBayerThruLUT);
}
#endif
}
//#if BUILD_PROSPECT
else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16)
{
// Apply the inverse frame transform and output rows of luma and chroma
int precision = codec->precision;
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
}
//#endif
else
{
// Reconstruct the frame as separate planes and combine the planes into a packed output image
int channel;
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if (lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
STOP(tk_inverse);
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
START(tk_inverse);
}
else
// In SIF resolution, no need to reconstruct the bottom-level wavelet transforms
// Just copy the lowpass images directly into output frame
if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
{
int precision = codec->precision;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile)
{
char label[PATH_MAX];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
STOP(tk_inverse);
#if BAYER_SUPPORT
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[4096*3],*sptr;
//unsigned short scanline2[4096*3],*sptr2;
unsigned short *scanline, *sptr;
unsigned short *scanline2, *sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
IMAGE *gd_image = lowpass_images[3];
uint8_t *outyuv, *line = output;
PIXEL *bayer_line, *bayerptr;
PIXEL *G, *RG, *BG, *GD;
int x, y;
int bayer_pitch = info->width * 4;
int format = info->format;
bool inverted = false;
int maxbound = 4095; //10-bit source
int midpoint = 32768 >> 3;
int shift = 4;
if (precision == 12)
{
maxbound = 16383;
midpoint = 32768 >> 1;
shift = 2;
}
if (buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32)
{
inverted = true;
line += (info->height - 1) * pitch;
pitch = -pitch;
}
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
for (y = 0; y < info->height; y++)
{
uint8_t *newline = line;
PIXEL *newG = G, *newRG = RG, *newBG = BG;
PIXEL *gptr, *rgptr, *bgptr, *gdptr;
int r, g, b, rg, bg, y1, y2, u, v;
int r1, g1, b1;
int i;
newline += pitch * y;
newG += y * (g_image->pitch / sizeof(PIXEL));
newRG += y * (rg_image->pitch / sizeof(PIXEL));
newBG += y * (bg_image->pitch / sizeof(PIXEL));
gptr = newG;
rgptr = newRG;
bgptr = newBG;
sptr = scanline;
for (x = 0; x < info->width; x++)
{
g = (*gptr++);
if (g > maxbound) g = maxbound;
rg = (*rgptr++);
bg = (*bgptr++);
r = (rg << 1) - midpoint + g;
b = (bg << 1) - midpoint + g;
if (r > maxbound) r = maxbound;
if (b > maxbound) b = maxbound;
if (r < 0) r = 0;
if (g < 0) g = 0;
if (b < 0) b = 0;
*sptr++ = r << shift;
*sptr++ = g << shift;
*sptr++ = b << shift;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if (decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr,
newline, y, pitch,
info->format, whitebitdepth, flags);
}
}
#endif
}
else if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
IMAGE *g_image = lowpass_images[0];
IMAGE *rg_image = lowpass_images[1];
IMAGE *bg_image = lowpass_images[2];
uint8_t *line = output;
unsigned char *rgb8;
PIXEL *G, *RG, *BG;
int x, y;
G = g_image->band[0];
RG = rg_image->band[0];
BG = bg_image->band[0];
if (info->format == DECODED_FORMAT_RGB32)
{
line = output;
line += (info->height - 1) * pitch;
for (y = 0; y < info->height; y++)
{
PIXEL *gptr, *rgptr, *bgptr;
int r, g, b;
int i, noisearray[32];
for (i = 0; i < 32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for (x = 0; x < info->width; x++)
{
int rnd = noisearray[x & 31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if (r < 0) r = 0;
if (r > 255) r = 255;
if (g < 0) g = 0;
if (g > 255) g = 255;
if (b < 0) b = 0;
if (b > 255) b = 255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
*rgb8++ = 255;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
else if (info->format == DECODED_FORMAT_RGB24)
{
line = output;
line += (info->height - 1) * pitch;
for (y = 0; y < info->height; y++)
{
PIXEL *gptr, *rgptr, *bgptr;
int r, g, b;
int i, noisearray[32];
for (i = 0; i < 32; i++)
{
noisearray[i] = (rand() & 63);
}
gptr = G;
rgptr = RG;
bgptr = BG;
rgb8 = (unsigned char *)line;
for (x = 0; x < info->width; x++)
{
int rnd = noisearray[x & 31];
g = ((*gptr++) + rnd) >> 6;
r = ((*rgptr++) + rnd) >> 6;
b = ((*bgptr++) + rnd) >> 6;
if (r < 0) r = 0;
if (r > 255) r = 255;
if (g < 0) g = 0;
if (g > 255) g = 255;
if (b < 0) b = 0;
if (b > 255) b = 255;
*rgb8++ = b;
*rgb8++ = g;
*rgb8++ = r;
}
line -= pitch;
G += g_image->pitch / sizeof(PIXEL);
RG += rg_image->pitch / sizeof(PIXEL);
BG += bg_image->pitch / sizeof(PIXEL);
}
}
}
else
#endif
{
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
START(tk_inverse);
#if (0 && DEBUG)
if (logfile)
{
char label[PATH_MAX];
int width = info->width;
int height = info->height;
sprintf(label, "Output");
DumpBufferStatistics(label, output, width, height, pitch, logfile);
}
#endif
}
// In full resolution, reconstruct the frame wavelet and
// convert the YUYV output to the specified color format
else
{
// Handle inversion of the output image in this routine
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0)
{
int height = info->height;
if (resolution == DECODED_RESOLUTION_FULL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
//#if BUILD_PROSPECT
// Output the frame in V210 foramt?
if ( (format == DECODED_FORMAT_V210 ||
format == DECODED_FORMAT_YU64) &&
decoder->codec.encoded_format != ENCODED_FORMAT_BAYER )
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// The output buffer is an array of 10-bit pixels packed into double words
#if 0
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2,
buffer, buffer_size, chroma_offset, decoder->codec.precision);
#else
TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
else
//#endif
// Decoding a full resolution progressive frame to a Bayer output format?
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
int precision = codec->precision;
// PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16);
if (decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width * decoded_height * 4 * sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width * decoded_height * 4 * sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width * decoded_height * 4 * sizeof(PIXEL);
}
//TODO: Replace this memory allocation with a scratch buffer allocation
//#ifdef SHARPENING
if (decoder->RGBFilterBuffer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width * decoded_height * 4 * 3 * sizeof(PIXEL);
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width * decoded_height * 4 * 3 * sizeof(PIXEL), 16);
#endif
decoder->RGBFilterBufferSize = info->width * decoded_height * 4 * 3 * sizeof(PIXEL);
}
//#endif
if (decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
if (decoder->RawBayer16)
{
uint8_t *line;
PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16;
PIXEL16U *G, *RG, *BG, *GD;
int x, y;
int bayer_pitch = info->width * 4;
//float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
#if 0
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f / 256.0f},
{-0.101f, -0.338f, 0.439f, 0.5f},
{0.439f, -0.399f, -0.040f, 0.5f}
};
float mtrx[3][4] =
{
{1.0f, 0, 0, 0},
{0, 1.0f, 0, 0},
{0, 0, 1.0f, 0}
};
float whitebalance[3] = { 1.0f, 1.0f, 1.0f };
#endif
#if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions
/* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2)
{
float fval = 0.0;
int i;
for(i=0; i<12; i++)
{
mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3];
if((i>>2) == (i&3))
{
if(fval != 1.0)
{
matrix_non_unity = 1;
}
}
else
{
if(fval != 0.0)
{
matrix_non_unity = 1;
}
}
}
// not active as VFW isn't yet support the 3D LUTs
if(decoder->cfhddata.version >= 5)
{
int j;
float encode_curvebase = 90.0;
float decode_curvebase = 90.0;
int encode_curve_type = decoder->cfhddata.encode_curve >> 16;
int decode_curve_type = decoder->cfhddata.decode_curve >> 16;
if(decoder->cfhddata.user_white_balance[0] > 0.0)
{
wb_non_unity = 1;
whitebalance[0] = decoder->cfhddata.user_white_balance[0];
whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0;
whitebalance[2] = decoder->cfhddata.user_white_balance[3];
}
if(encode_curve_type) //1 or 2
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
else
{
encode_curve_type = 1;
encode_curvebase = 90.0;
}
if(decode_curve_type) //1 or 2
decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff);
else
{
decode_curve_type = 1;
decode_curvebase = 90.0;
}
for(j=0; j<2048; j++)
{
if(encode_curve_type == 1)
curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase);
else
curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase);
}
for(j=-512; j<=2048; j++) // -1 to +4
{
if(encode_curve_type == CURVE_TYPE_LOG)
lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase);
else
lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase);
}
}
}*/
#endif
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch * sizeof(PIXEL),
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, bayer_pitch * sizeof(PIXEL), info,
&decoder->scratch, chroma_offset, precision);
#endif
if (resolution == DECODED_RESOLUTION_FULL_DEBAYER &&
(info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4))
{
#if _THREADED //DemosaicRAW
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* int bayer_format = decoder->cfhddata.bayer_format;
unsigned char *outA8, *outB8;
unsigned short *lineStartA16, *lineStartB16;
unsigned short *lineA16, *lineB16;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++)
{
bayer_line = decoder->RawBayer16;
bayer_line += bayer_pitch * y;
if(y<info->height)
{
ColorDifference2Bayer(info->width,
bayer_line, bayer_pitch, bayer_format);
}
if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline
{
unsigned short *delayptr = decoder->RawBayer16;
delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES);
BayerRippleFilter(info->width,
delayptr, bayer_pitch, bayer_format, decoder->RawBayer16);
}
if(y>=DEMOSAIC_DELAYLINES)
{
int delay_y = y - DEMOSAIC_DELAYLINES;
unsigned short *sptr, scanline[8192*3];
outA8 = line;
line += pitch;
outB8 = line;
line += pitch;
sptr = scanline;
DebayerLine(info->width*2, info->height*2, delay_y*2,
decoder->RawBayer16, bayer_format, sptr, sharpening);
for(x=0; x<info->width*2; x++)
{
outA8[2] = *sptr++>>8;
outA8[1] = *sptr++>>8;
outA8[0] = *sptr++>>8;
outA8+=3;
}
for(x=0; x<info->width*2; x++)
{
outB8[2] = *sptr++>>8;
outB8[1] = *sptr++>>8;
outB8[0] = *sptr++>>8;
outB8+=3;
}
}
}*/
#endif // _THREADED
}
else
if (format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
assert(0) // old code disabled
/* {
int bayer_format = decoder->cfhddata.bayer_format;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
line = output;
bayer_line = decoder->RawBayer16;
for(y=0; y<info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = BG + bayer_pitch/4;
for(x=0; x<info->width; x++)
{
int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
if(r < 0) r = 0;
if(g1 < 0) g1 = 0;
if(g2 < 0) g2 = 0;
if(b < 0) b = 0;
if(r > 0xffff) r = 0xffff;
if(g1 > 0xffff) g1 = 0xffff;
if(g2 > 0xffff) g2 = 0xffff;
if(b > 0xffff) b = 0xffff;
switch(bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
}
}
bayer_line += bayer_pitch;
}
if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
int bayer_format = decoder->cfhddata.bayer_format;
for(y=2; y<info->height-3; y++)
{
int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch>>1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for(x=2; x<info->width-2; x++)
{
int mn,mx,g;
int range = 8*256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset+1];
if(mn > outA16[offset-1]) mn = outA16[offset-1];
if(mx < outA16[offset-1]) mx = outA16[offset-1];
if((outA16[-offset-1] & 1)==0)
{
if(mn > outA16[-offset-1]) mn = outA16[-offset-1];
if(mx < outA16[-offset-1]) mx = outA16[-offset-1];
}
if((outA16[-offset+1] & 1)==0)
{
if(mn > outA16[-offset+1]) mn = outA16[-offset+1];
if(mx < outA16[-offset+1]) mx = outA16[-offset+1];
}
delta = mx - mn;
if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx)))
{
int gmn,gmx;
gmn = gmx = g;
if((outA16[-2*offset-2] & 1)==0)
{
if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2];
if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2];
}
if((outA16[-2*offset] & 1)==0)
{
if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset];
if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset];
}
if((outA16[-2*offset+2] & 1)==0)
{
if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2];
if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2];
}
if((outA16[-2] & 1)==0)
{
if(gmn > outA16[-2]) gmn = outA16[-2];
if(gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2];
if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2];
if(gmn > outA16[2*offset]) gmn = outA16[2*offset];
if(gmx < outA16[2*offset]) gmx = outA16[2*offset];
if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2];
if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2];
if(gmn > outA16[2]) gmn = outA16[2];
if(gmx < outA16[2]) gmx = outA16[2];
if((gmx - gmn) < range)
{
alpha = range;//delta;
if(g > mx)
{
alpha *= (g-mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn-g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift;
if(val > 0xffff) val = 0xffff;
if(val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
}*/
#endif
}
// Pack the rows of Bayer data (full resolution progressive) into BYR3 format?
else if (format == DECODED_FORMAT_BYR3)
{
PIXEL16U *outR, *outG1, *outG2, *outB;
// int stats1=0, stats2=0, statsd=0;
// double dstats1=0, dstats2=0, dstatsd=0;
// #pragma omp parallel for
for (y = 0; y < info->height; y++)
{
uint8_t *line = output;
PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16;
line += pitch * 2 * y;
bayerptr += bayer_pitch * y;
outR = (PIXEL16U *)line;
outG1 = outR + (pitch / 4);
outG2 = outR + (pitch / 4) * 2;
outB = outR + (pitch / 4) * 3;
G = (PIXEL16U *)bayerptr;
RG = G + bayer_pitch / 4;
BG = RG + bayer_pitch / 4;
GD = BG + bayer_pitch / 4;
// Pack the rows of Bayer components into the BYR3 pattern
#if (XMMOPT)
{
__m128i *G_128 = (__m128i *)G;
__m128i *RG_128 = (__m128i *)RG;
__m128i *BG_128 = (__m128i *)BG;
__m128i *GD_128 = (__m128i *)GD;
__m128i *outR_128 = (__m128i *)outR;
__m128i *outG1_128 = (__m128i *)outG1;
__m128i *outG2_128 = (__m128i *)outG2;
__m128i *outB_128 = (__m128i *)outB;
__m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff);
__m128i midpoint1 = _mm_set1_epi16(32768 >> 6);
__m128i midpoint2 = _mm_set1_epi16(32768 >> 5);
int column_step = 8;
int post_column = (info->width) - ((info->width) % column_step);
for (x = 0; x < post_column; x += column_step)
{
__m128i r_128;
__m128i g1_128;
__m128i g2_128;
__m128i b_128;
__m128i g_128;
__m128i rg_128;
__m128i bg_128;
__m128i gd_128;
g_128 = _mm_load_si128(G_128++);
rg_128 = _mm_load_si128(RG_128++);
bg_128 = _mm_load_si128(BG_128++);
gd_128 = _mm_load_si128(GD_128++);
g_128 = _mm_srli_epi16(g_128, 6);
rg_128 = _mm_srli_epi16(rg_128, 5);
bg_128 = _mm_srli_epi16(bg_128, 5);
gd_128 = _mm_srli_epi16(gd_128, 6);
gd_128 = _mm_subs_epi16(gd_128, midpoint1);
rg_128 = _mm_subs_epi16(rg_128, midpoint2);
bg_128 = _mm_subs_epi16(bg_128, midpoint2);
r_128 = _mm_adds_epi16(rg_128, g_128);
b_128 = _mm_adds_epi16(bg_128, g_128);
g1_128 = _mm_adds_epi16(g_128, gd_128);
g2_128 = _mm_subs_epi16(g_128, gd_128);
r_128 = _mm_adds_epi16(r_128, limiter);
r_128 = _mm_subs_epu16(r_128, limiter);
g1_128 = _mm_adds_epi16(g1_128, limiter);
g1_128 = _mm_subs_epu16(g1_128, limiter);
g2_128 = _mm_adds_epi16(g2_128, limiter);
g2_128 = _mm_subs_epu16(g2_128, limiter);
b_128 = _mm_adds_epi16(b_128, limiter);
b_128 = _mm_subs_epu16(b_128, limiter);
_mm_store_si128(outR_128++, r_128);
_mm_store_si128(outG1_128++, g1_128);
_mm_store_si128(outG2_128++, g2_128);
_mm_store_si128(outB_128++, b_128);
}
G = (PIXEL16U *)G_128;
RG = (PIXEL16U *)RG_128;
BG = (PIXEL16U *)BG_128;
GD = (PIXEL16U *)GD_128;
outR = (PIXEL16U *)outR_128;
outG1 = (PIXEL16U *)outG1_128;
outG2 = (PIXEL16U *)outG2_128;
outB = (PIXEL16U *)outB_128;
}
#endif
for (; x < info->width; x++)
{
int r, g, b, rg, bg, gd, g1, g2;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - 32768;
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
g1 = g + gd;
g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output )
if (r < 0) r = 0;
if (g1 < 0) g1 = 0;
if (g2 < 0) g2 = 0;
if (b < 0) b = 0;
if (r > 0xffff) r = 0xffff;
if (g1 > 0xffff) g1 = 0xffff;
if (g2 > 0xffff) g2 = 0xffff;
if (b > 0xffff) b = 0xffff;
//Red-grn phase
*outR++ = r >> 6;
*outG1++ = g1 >> 6;
*outG2++ = g2 >> 6;
*outB++ = b >> 6;
}
}
}
// Pack the rows of Bayer data (full resolution progressive) into BYR4 format?
else if (format == DECODED_FORMAT_BYR4)
{
int bayer_format = decoder->cfhddata.bayer_format;
line = output;
bayer_line = decoder->RawBayer16;
for (y = 0; y < info->height; y++)
{
outA16 = (PIXEL16U *)line;
line += pitch;
outB16 = (PIXEL16U *)line;
line += pitch;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch / 4;
BG = RG + bayer_pitch / 4;
GD = BG + bayer_pitch / 4;
for (x = 0; x < info->width; x++)
{
//int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither;
int32_t r, g, b, rg, bg, gd, g1, g2;
// The output of the inverse transform is unsigned 16-bit integers
const int midpoint = 32768;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
gd = (*GD++) - midpoint;
r = ((rg - midpoint) << 1) + g;
b = ((bg - midpoint) << 1) + g;
g1 = g + gd;
g2 = g - gd;
r = SATURATE_16U(r);
g1 = SATURATE_16U(g1);
g2 = SATURATE_16U(g2);
b = SATURATE_16U(b);
// stats1+=g1;
// stats2+=g2;
// statsd+=gd;
switch (bayer_format)
{
case BAYER_FORMAT_RED_GRN: //Red-grn phase
*outA16++ = r;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = b;
break;
case BAYER_FORMAT_GRN_RED:// grn-red
*outA16++ = g1;
*outA16++ = r;
*outB16++ = b;
*outB16++ = g2;
break;
case BAYER_FORMAT_GRN_BLU:
*outA16++ = g1;
*outA16++ = b;
*outB16++ = r;
*outB16++ = g2;
break;
case BAYER_FORMAT_BLU_GRN:
*outA16++ = b;
*outA16++ = g1;
*outB16++ = g2;
*outB16++ = r;
break;
default:
// Unsupported Bayer format
assert(0);
*outA16++ = 0;
*outA16++ = 0;
*outB16++ = 0;
*outB16++ = 0;
break;
}
}
bayer_line += bayer_pitch;
}
if (decoder->flags & DECODER_FLAGS_HIGH_QUALITY)
{
for (y = 2; y < info->height - 3; y++)
{
//int offset = pitch>>1;
line = output; //0
line += pitch * y * 2;
// If on a red line, move to a blue line
if (bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN)
line -= pitch;
{
int offset = pitch >> 1;
outA16 = (PIXEL16U *)line;
outA16++; //g //for BAYER_FORMAT_RED_GRN input
outA16++; //b
outA16++; //g
outA16++; //b
//point to green pixel with *outA16
if (bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU)
outA16++;
for (x = 2; x < info->width - 2; x++)
{
int mn, mx, g;
int range = 8 * 256; //1<<11
int shift = 11;
int delta;
int alpha;
g = *outA16;
// lines below do not need to be tested for a corrected value
mn = mx = outA16[offset + 1];
if (mn > outA16[offset - 1]) mn = outA16[offset - 1];
if (mx < outA16[offset - 1]) mx = outA16[offset - 1];
if ((outA16[-offset - 1] & 1) == 0)
{
if (mn > outA16[-offset - 1]) mn = outA16[-offset - 1];
if (mx < outA16[-offset - 1]) mx = outA16[-offset - 1];
}
if ((outA16[-offset + 1] & 1) == 0)
{
if (mn > outA16[-offset + 1]) mn = outA16[-offset + 1];
if (mx < outA16[-offset + 1]) mx = outA16[-offset + 1];
}
delta = mx - mn;
if (delta < range && ((mn - range < g && g < mn) || (mx + range > g && g > mx)))
{
int gmn, gmx;
gmn = gmx = g;
if ((outA16[-2 * offset - 2] & 1) == 0)
{
if (gmn > outA16[-2 * offset - 2]) gmn = outA16[-2 * offset - 2];
if (gmx < outA16[-2 * offset - 2]) gmx = outA16[-2 * offset - 2];
}
if ((outA16[-2 * offset] & 1) == 0)
{
if (gmn > outA16[-2 * offset]) gmn = outA16[-2 * offset];
if (gmx < outA16[-2 * offset]) gmx = outA16[-2 * offset];
}
if ((outA16[-2 * offset + 2] & 1) == 0)
{
if (gmn > outA16[-2 * offset + 2]) gmn = outA16[-2 * offset + 2];
if (gmx < outA16[-2 * offset + 2]) gmx = outA16[-2 * offset + 2];
}
if ((outA16[-2] & 1) == 0)
{
if (gmn > outA16[-2]) gmn = outA16[-2];
if (gmx < outA16[-2]) gmx = outA16[-2];
}
// lines below do not need to be tested for a corrected value
if (gmn > outA16[2 * offset - 2]) gmn = outA16[2 * offset - 2];
if (gmx < outA16[2 * offset - 2]) gmx = outA16[2 * offset - 2];
if (gmn > outA16[2 * offset]) gmn = outA16[2 * offset];
if (gmx < outA16[2 * offset]) gmx = outA16[2 * offset];
if (gmn > outA16[2 * offset + 2]) gmn = outA16[2 * offset + 2];
if (gmx < outA16[2 * offset + 2]) gmx = outA16[2 * offset + 2];
if (gmn > outA16[2]) gmn = outA16[2];
if (gmx < outA16[2]) gmx = outA16[2];
if ((gmx - gmn) < range)
{
alpha = range;//delta;
if (g > mx)
{
alpha *= (g - mx); //max range
alpha >>= shift;
}
else // g < mn
{
alpha *= (mn - g); //max range
alpha >>= shift;
}
alpha *= alpha;
alpha >>= shift;
// avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2;
// *outA16 = avg; //good
// *outA16 = mn; //spotty
if ( (abs(outA16[offset] - outA16[-offset]) < range)
&& ((abs(outA16[1] - outA16[-1]) < range)))
{
int val = (alpha * g + (range - alpha) * ((mn + mx) >> 1)) >> shift;
if (val > 0xffff) val = 0xffff;
if (val < 0) val = 0;
val |= 1;
*outA16 = val;
// *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute
}
}
}
outA16++; //g
outA16++; //b
}
}
}
}
// Linear restore
{
unsigned short *buff = (unsigned short *)output;
//static int pos = 0;
for (y = 0; y < info->height * 2; y++)
{
for (x = 0; x < info->width * 2; x++)
{
float val = (float)buff[y * info->width * 2 + x] / 65535.0f;
float encode_curvebase = 90.0;
int encode_curve_type = CURVE_TYPE_LOG;
int encode_curve_neg;
if ((decoder->cfhddata.encode_curve) >> 16) //1 or 2
{
encode_curve_type = (decoder->cfhddata.encode_curve) >> 16;
if (encode_curve_type & CURVE_TYPE_EXTENDED)
encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases
else
encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff);
}
if (encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR)
encode_curve_type = CURVE_TYPE_LINEAR;
encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE;
switch (encode_curve_type & CURVE_TYPE_MASK)
{
case CURVE_TYPE_LOG:
val = CURVE_LOG2LIN(val, encode_curvebase);
break;
case CURVE_TYPE_GAMMA:
val = CURVE_GAM2LIN(val, encode_curvebase);
break;
case CURVE_TYPE_CINEON:
val = CURVE_CINEON2LIN(val, encode_curvebase);
break;
case CURVE_TYPE_CINE985:
val = CURVE_CINE9852LIN(val, encode_curvebase);
break;
case CURVE_TYPE_PARA:
val = CURVE_PARA2LIN(val, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff));
break;
case CURVE_TYPE_CSTYLE:
val = CURVE_CSTYLE2LIN((float)val, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff));
break;
case CURVE_TYPE_SLOG:
val = CURVE_SLOG2LIN((float)val);
break;
case CURVE_TYPE_LOGC:
val = CURVE_LOGC2LIN((float)val);
break;
case CURVE_TYPE_LINEAR:
default:
break;
}
buff[y * info->width * 2 + x] = (int)(val * 4095.0);
}
}
}
}
else
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#else
//unsigned short scanline[8192*3],*sptr;
//unsigned short scanline2[8192*3],*sptr2;
unsigned short *scanline, *sptr;
unsigned short *scanline2, *sptr2;
char *buffer = decoder->scratch.free_ptr;
size_t buffer_size = decoder->scratch.free_size;
uint8_t *outyuv, *line = output;
PIXEL *bayerptr;
int x, y;
if (buffer_size < info->width * 2 * 3 * 2)
assert(0); // not enough memory
scanline = (unsigned short *)buffer;
buffer += info->width * 2 * 3;
scanline2 = (unsigned short *)buffer;
line = output;
bayer_line = decoder->RawBayer16;
for (y = 0; y < info->height; y++)
{
int r, g, b, rg, bg, y1, y2, u, v;
int r1, g1, b1;
int i;
__m128i gggggggg, ggggggg2, rgrgrgrg, bgbgbgbg;
__m128i rrrrrrrr, bbbbbbbb;
__m128i mid8192 = _mm_set1_epi16(8192);
__m128i mid16384 = _mm_set1_epi16(16384);
__m128i mid32768 = _mm_set1_epi16(32768);
__m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff - 0x3fff);
int sse2width = info->width & 0xfff8;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch / 4;
BG = RG + bayer_pitch / 4;
GD = BG + bayer_pitch / 4;
sptr = scanline;
x = 0;
for (; x < sse2width; x += 8)
{
gggggggg = _mm_loadu_si128((__m128i *)G);
G += 8;
rgrgrgrg = _mm_loadu_si128((__m128i *)RG);
RG += 8;
bgbgbgbg = _mm_loadu_si128((__m128i *)BG);
BG += 8;
ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned
rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned
bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned
rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767
bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767
//limit to 0 to 16383
rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16);
rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16);
//limit to 0 to 16383
bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16);
bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16);
rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535
bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535
*sptr++ = _mm_extract_epi16(rrrrrrrr, 0);
*sptr++ = _mm_extract_epi16(gggggggg, 0);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 0);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 1);
*sptr++ = _mm_extract_epi16(gggggggg, 1);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 1);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 2);
*sptr++ = _mm_extract_epi16(gggggggg, 2);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 2);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 3);
*sptr++ = _mm_extract_epi16(gggggggg, 3);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 3);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 4);
*sptr++ = _mm_extract_epi16(gggggggg, 4);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 4);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 5);
*sptr++ = _mm_extract_epi16(gggggggg, 5);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 5);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 6);
*sptr++ = _mm_extract_epi16(gggggggg, 6);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 6);
*sptr++ = _mm_extract_epi16(rrrrrrrr, 7);
*sptr++ = _mm_extract_epi16(gggggggg, 7);
*sptr++ = _mm_extract_epi16(bbbbbbbb, 7);
}
for (; x < info->width; x++)
{
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
if (r < 0) r = 0;
if (r > 0xffff) r = 0xffff;
if (g < 0) g = 0;
if (g > 0xffff) g = 0xffff;
if (b < 0) b = 0;
if (b > 0xffff) b = 0xffff;
*sptr++ = r;
*sptr++ = g;
*sptr++ = b;
}
{
int flags = 0;
int whitebitdepth = 16;
sptr = scanline;
if (decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch,
info->format, whitebitdepth, flags);
}
line += pitch;
bayer_line += bayer_pitch;
}
#endif
}
/* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201
// Pack the rows of Bayer data (full resolution progressive) into BYR2 format?
else if (format == DECODED_FORMAT_YUYV)
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale);
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
for(y=0; y<info->height; y++)
{
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 255) y1 = 255;
if(y2 < 0) y2 = 0;
if(y2 > 255) y2 = 255;
if(u < 0) u = 0;
if(u > 255) u = 255;
if(v < 0) v = 0;
if(v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
bayer_line = decoder->RawBayer16;
scale = 16384.0;
//_mm_empty(); // Clear the mmx register state
y_rmult = ((rgb2yuv[0][0]) * scale);
y_gmult = ((rgb2yuv[0][1]) * scale);
y_bmult = ((rgb2yuv[0][2]) * scale);
y_offset= ((rgb2yuv[0][3]) * scale * 4.0);
u_rmult = ((rgb2yuv[1][0]) * scale);
u_gmult = ((rgb2yuv[1][1]) * scale);
u_bmult = ((rgb2yuv[1][2]) * scale);
u_offset= ((rgb2yuv[1][3]) * scale * 4.0);
v_rmult = ((rgb2yuv[2][0]) * scale);
v_gmult = ((rgb2yuv[2][1]) * scale);
v_bmult = ((rgb2yuv[2][2]) * scale);
v_offset= ((rgb2yuv[2][3]) * scale * 4.0);
scale = 4096.0;
r_rmult= (mtrx[0][0] * scale * whitebalance[0]);
r_gmult= (mtrx[0][1] * scale * whitebalance[1]);
r_bmult= (mtrx[0][2] * scale * whitebalance[2]);
r_offset= (mtrx[0][3] * scale);
g_rmult= (mtrx[1][0] * scale * whitebalance[0]);
g_gmult= (mtrx[1][1] * scale * whitebalance[1]);
g_bmult= (mtrx[1][2] * scale * whitebalance[2]);
g_offset= (mtrx[1][3] * scale);
b_rmult= (mtrx[2][0] * scale * whitebalance[0]);
b_gmult= (mtrx[2][1] * scale * whitebalance[1]);
b_bmult= (mtrx[2][2] * scale * whitebalance[2]);
b_offset= (mtrx[2][3] * scale);
y_offset += 26;
u_offset += 26;
v_offset += 26;
for(y=0; y<info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
for(x=0; x<info->width; x+=2)
{
int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768)<<1) + g;
b = ((bg - 32768)<<1) + g;
// dither = (rand() & 65535)<<1;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12);
g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12);
b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12);
//TODO : need on convert back to log/display curve.
if(r1 < 0) r1 = 0;
if(r1 > 65535) r1 = 65535;
if(g1 < 0) g1 = 0;
if(g1 > 65535) g1 = 65535;
if(b1 < 0) b1 = 0;
if(b1 > 65535) b1 = 65535;
}
else
{
r1 = r;
g1 = g;
b1 = b;
}
y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset;
u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift);
v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if(y1 < 0) y1 = 0;
if(y1 > 65535) y1 = 65535;
if(y2 < 0) y2 = 0;
if(y2 > 65535) y2 = 65535;
if(u < 0) u = 0;
if(u > 65535) u = 65535;
if(v < 0) v = 0;
if(v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
bayer_line += bayer_pitch;
}
}
else //RGBs
{
line = output;
bayer_line = decoder->RawBayer16;
scale = 256.0;
r_rmult = (mtrx[0][0]) * scale * whitebalance[0];
r_gmult = (mtrx[0][1]) * scale * whitebalance[1];
r_bmult = (mtrx[0][2]) * scale * whitebalance[2];
r_offset= (mtrx[0][3]) * scale;
g_rmult = (mtrx[1][0]) * scale * whitebalance[0];
g_gmult = (mtrx[1][1]) * scale * whitebalance[1];
g_bmult = (mtrx[1][2]) * scale * whitebalance[2];
g_offset= (mtrx[1][3]) * scale;
b_rmult = (mtrx[2][0]) * scale * whitebalance[0];
b_gmult = (mtrx[2][1]) * scale * whitebalance[1];
b_bmult = (mtrx[2][2]) * scale * whitebalance[2];
b_offset= (mtrx[2][3]) * scale;
for(y=0; y<info->height; y++)
{
int i,noisearray[32];
outyuv = line;
bayerptr = bayer_line;
G = bayerptr;
RG = G + bayer_pitch/4;
BG = RG + bayer_pitch/4;
GD = RG + bayer_pitch/4;
for(i=0; i<32; i++)
{
noisearray[i] = (rand() & 127);
}
if(info->format == DECODED_FORMAT_RGB32)
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
// g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO : need on convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO : need on convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else
{
for(x=0; x<info->width; x++)
{
int R1,G1,B1;
int rnd = noisearray[x&31];
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
// *ptr++ = *bayerptr++ >> 8;
// *ptr++ = 0x80;
int r,g,b,g1,g2,gdiff,y1,y2,u,v;
//g = (g1+g2)>>1;
// *g_row_ptr++ = g;
// *rg_row_ptr++ = (r-g+256)>>1;
// *bg_row_ptr++ = (b-g+256)>>1;
// *gdiff_row_ptr++ = (g1-g2+256)>>1;
g = ((*G++)>>1);
r = ((*RG++ + 64)>>0)-(256<<7)+g;
b = ((*BG++ + 64)>>0)-(256<<7)+g;
// gdiff = ((*GD++ + 64)>>7)-256+g;
if(matrix_non_unity)
{
//TODO: Need to convert to linear first.
R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd;
G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd;
B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd;
//TODO: Need to convert back to log/display curve.
}
else
{
R1 = r + rnd;
G1 = g + rnd;
B1 = b + rnd;
}
R1 >>= 7;
G1 >>= 7;
B1 >>= 7;
if(R1 < 0) R1 = 0;
if(R1 > 255) R1 = 255;
if(G1 < 0) G1 = 0;
if(G1 > 255) G1 = 255;
if(B1 < 0) B1 = 0;
if(B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
line += pitch;
bayer_line += bayer_pitch;
}
}
*/
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
int precision = codec->precision;
if (decoder->RawBayer16 == NULL)
{
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
size_t size = info->width * info->height * num_channels * sizeof(PIXEL);
decoder->RawBayer16 =
(PIXEL16U *)AllocAligned(allocator, size, 16);
#else
decoder->RawBayer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width * info->height * num_channels * sizeof(PIXEL), 16);
#endif
decoder->RawBayerSize = info->width * info->height * num_channels * sizeof(PIXEL);
}
//#ifdef SHARPENING
if (decoder->RGBFilterBuffer16 == NULL)
{
int frame_size = info->width * decoded_height * 4 * 3 * sizeof(PIXEL);
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
frame_size = info->width * decoded_height * 4 * 4 * sizeof(PIXEL);
#if _ALLOCATOR
{
ALLOCATOR *allocator = decoder->allocator;
decoder->RGBFilterBuffer16 =
(PIXEL16U *)AllocAligned(allocator, frame_size, 16);
}
#else
decoder->RGBFilterBuffer16 =
(PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
decoder->RGBFilterBufferSize = frame_size;
}
//#endif
if (decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL)
{
decoder->error = CODEC_ERROR_MEMORY_ALLOC;
return;
}
//TODO: Replace this memory allocation with a scratch buffer allocation
if (decoder->RawBayer16)
{
uint8_t *outyuv, *line, *source_line;
PIXEL16U *bayerptr;
PIXEL16U *G, *RG, *BG;
int x, y;
int src_pitch = info->width * num_channels * sizeof(PIXEL);
int y_rmult, y_gmult, y_bmult, y_offset; //shift=8;
int u_rmult, u_gmult, u_bmult, u_offset;
int v_rmult, v_gmult, v_bmult, v_offset;
float scale = 256.0;
//int matrix_non_unity = 0;
//int wb_non_unity = 0;
//float curve2lin[2048];
//float lin2curve[2048+512+2];
static float rgb2yuv[3][4] =
{
{0.183f, 0.614f, 0.062f, 16.0f / 256.0f},
{-0.101f, -0.338f, 0.439f, 0.5f},
{0.439f, -0.399f, -0.040f, 0.5}
};
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, src_pitch,
info, chroma_offset, precision);
#else
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
decoder->RawBayer16, src_pitch, info,
&decoder->scratch, chroma_offset, precision);
#endif
if (format == DECODED_FORMAT_YUYV)
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 256.0;
y_rmult = (int)((rgb2yuv[0][0]));
y_gmult = (int)((rgb2yuv[0][1]));
y_bmult = (int)((rgb2yuv[0][2]));
y_offset = (int)((rgb2yuv[0][3]));
u_rmult = (int)((rgb2yuv[1][0]));
u_gmult = (int)((rgb2yuv[1][1]));
u_bmult = (int)((rgb2yuv[1][2]));
u_offset = (int)((rgb2yuv[1][3]));
v_rmult = (int)((rgb2yuv[2][0]));
v_gmult = (int)((rgb2yuv[2][1]));
v_bmult = (int)((rgb2yuv[2][2]));
v_offset = (int)((rgb2yuv[2][3]));
for (y = 0; y < info->height; y++)
{
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch / (2 * num_channels);
BG = RG + src_pitch / (2 * num_channels);
for (x = 0; x < info->width; x += 2)
{
int r, g, b, r1, g1, b1, rg, bg, y1, y2, u, v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
r1 = r;
g1 = g;
b1 = b;
y1 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768) >> 16;
u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768) >> 16;
v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768) >> 16;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
r1 = r;
g1 = g;
b1 = b;
y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768) >> 16;
u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768) >> 16;
v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768) >> 16;
u >>= 1;
v >>= 1;
y1 += y_offset;
y2 += y_offset;
u += u_offset;
v += v_offset;
if (y1 < 0) y1 = 0;
if (y1 > 255) y1 = 255;
if (y2 < 0) y2 = 0;
if (y2 > 255) y2 = 255;
if (u < 0) u = 0;
if (u > 255) u = 255;
if (v < 0) v = 0;
if (v > 255) v = 255;
*outyuv++ = y1;
*outyuv++ = u;
*outyuv++ = y2;
*outyuv++ = v;
}
line += pitch;
source_line += src_pitch;
}
}
else if (format == DECODED_FORMAT_YU64)
{
int shift = 14;
PIXEL16U *outyuv64;
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
scale = 16384.0;
y_rmult = (int)((rgb2yuv[0][0]) * scale);
y_gmult = (int)((rgb2yuv[0][1]) * scale);
y_bmult = (int)((rgb2yuv[0][2]) * scale);
y_offset = (int)((rgb2yuv[0][3]) * scale * 4.0f);
u_rmult = (int)((rgb2yuv[1][0]) * scale);
u_gmult = (int)((rgb2yuv[1][1]) * scale);
u_bmult = (int)((rgb2yuv[1][2]) * scale);
u_offset = (int)((rgb2yuv[1][3]) * scale * 4.0f);
v_rmult = (int)((rgb2yuv[2][0]) * scale);
v_gmult = (int)((rgb2yuv[2][1]) * scale);
v_bmult = (int)((rgb2yuv[2][2]) * scale);
v_offset = (int)((rgb2yuv[2][3]) * scale * 4.0f);
scale = 4096.0;
y_offset += 26;
u_offset += 26;
v_offset += 26;
for (y = 0; y < info->height; y++)
{
outyuv64 = (PIXEL16U *)line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch / (2 * num_channels);
BG = RG + src_pitch / (2 * num_channels);
for (x = 0; x < info->width; x += 2)
{
int r, g, b, r1, g1, b1, rg, bg, y1, y2, u, v;
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
r1 = r;
g1 = g;
b1 = b;
y1 = (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1) >> shift) + y_offset;
u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1) >> shift);
v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1) >> shift);
g = (*G++);
rg = (*RG++);
bg = (*BG++);
r = ((rg - 32768) << 1) + g;
b = ((bg - 32768) << 1) + g;
r1 = r;
g1 = g;
b1 = b;
y2 = (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1) >> shift) + y_offset;
u += (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1) >> shift);
v += (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1) >> shift);
u >>= 1;
v >>= 1;
u += u_offset;
v += v_offset;
if (y1 < 0) y1 = 0;
if (y1 > 65535) y1 = 65535;
if (y2 < 0) y2 = 0;
if (y2 > 65535) y2 = 65535;
if (u < 0) u = 0;
if (u > 65535) u = 65535;
if (v < 0) v = 0;
if (v > 65535) v = 65535;
*outyuv64++ = y1;
*outyuv64++ = v;
*outyuv64++ = y2;
*outyuv64++ = u;
}
line += pitch;
source_line += src_pitch;
}
}
else //RGBs
{
line = output;
source_line = (unsigned char *)decoder->RawBayer16;
for (y = 0; y < info->height; y++)
{
int i, noisearray[32];
unsigned short *rgb16 = (unsigned short *)line;
outyuv = line;
bayerptr = (PIXEL16U *)source_line;
G = bayerptr;
RG = G + src_pitch / (2 * num_channels);
BG = RG + src_pitch / (2 * num_channels);
for (i = 0; i < 32; i++)
{
noisearray[i] = (rand() & 255);
}
if (info->format == DECODED_FORMAT_RGB32)
{
for (x = 0; x < info->width; x++)
{
int R1, G1, B1;
int rnd = noisearray[x & 31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++ << 1) - (128 << 9)) + G1;
B1 = ((*BG++ << 1) - (128 << 9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if (R1 < 0) R1 = 0;
if (R1 > 255) R1 = 255;
if (G1 < 0) G1 = 0;
if (G1 > 255) G1 = 255;
if (B1 < 0) B1 = 0;
if (B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
*outyuv++ = 255;
}
}
else if (info->format == DECODED_FORMAT_RGB24)
{
for (x = 0; x < info->width; x++)
{
int R1, G1, B1;
int rnd = noisearray[x & 31];
#if 0
G1 = (*G++) + rnd;
R1 = ((*RG++ << 1) - (128 << 9)) + G1;
B1 = ((*BG++ << 1) - (128 << 9)) + G1;
#else
G1 = (*G++) + rnd;
R1 = (*RG++) + rnd;
B1 = (*BG++) + rnd;
#endif
R1 >>= 8;
G1 >>= 8;
B1 >>= 8;
if (R1 < 0) R1 = 0;
if (R1 > 255) R1 = 255;
if (G1 < 0) G1 = 0;
if (G1 > 255) G1 = 255;
if (B1 < 0) B1 = 0;
if (B1 > 255) B1 = 255;
*outyuv++ = B1;
*outyuv++ = G1;
*outyuv++ = R1;
}
}
else if (info->format == DECODED_FORMAT_RG48)
{
for (x = 0; x < info->width; x++)
{
int R1, G1, B1;
G1 = (*G++);
R1 = (*RG++);
B1 = (*BG++);
*rgb16++ = R1;
*rgb16++ = G1;
*rgb16++ = B1;
}
}
line += pitch;
source_line += src_pitch;
}
}
//MEMORY_ALIGNED_FREE(RawBayer16);
}
}
else // Output the frame in one of the RGB 8-bit formats
{
//char *buffer = decoder->buffer;
//size_t buffer_size = decoder->buffer_size;
// Invert the bottom wavelet and convert the output to the requested color format
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
#else
TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#endif
}
}
}
#if TIMING
// Count the number of progressive frames that were decoded
progressive_decode_count++;
#endif
}
STOP(tk_inverse);
#ifdef ADOBE_MEMORY_FUNCTIONS
if ((decoder->RawBayer16 && decoder->RawBayerSize > 2048 * 1152 * 2) ||
(decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048 * 1152 * 2))
{
#if _ALLOCATOR
if (decoder->RawBayer16)
{
FreeAligned(decoder->allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if (decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#else
if (decoder->RawBayer16)
{
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = NULL;
}
if (decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
decoder->RGBFilterBufferSize = NULL;
}
#endif
}
#endif
#if (0 && DEBUG)
if (logfile)
{
//uint8_t *subimage = output;
uint8_t *subimage = output + (2 * info->width) - 16;
DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile);
}
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Exit ReconstructFrameToBuffer\n");
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
}
// Reconstruct the frame to quarter resolution at full frame rate
void ReconstructQuarterFrame(DECODER *decoder, int num_channels,
int frame_index, uint8_t *output, int output_pitch,
FRAME_INFO *info, const SCRATCH *scratch, int precision)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform_array = decoder->transform;
int output_width = info->width;
int output_height = info->height;
PIXEL *low_row_ptr[CODEC_MAX_CHANNELS];
PIXEL *high_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int low_pitch[CODEC_MAX_CHANNELS];
int high_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// The pixels are descaled in the inverse temporal transform
//const int descale = 0;
// Shift the intermediate results to 16-bit pixels
const int shift_yu64 = 8;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Initialize a pointer for allocating space in the buffer
PIXEL *bufptr = (PIXEL *)buffer;
// Array of pointers to the start of each channel in the intermediate results
PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS];
// Check that there is enough space for the intermediate results from each channel
#if DEBUG
assert(output_width * sizeof(PIXEL) < buffer_size);
#endif
ComputeCube(decoder);
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet
IMAGE *low_wavelet = transform_array[channel]->wavelet[4];
IMAGE *high_wavelet = transform_array[channel]->wavelet[3];
// Get the pointers to the first row in each lowpass band
low_row_ptr[channel] = low_wavelet->band[0];
high_row_ptr[channel] = high_wavelet->band[0];
low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL);
high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL);
// Force the row of intermediate results to be properly aligned
bufptr = (PIXEL *)ALIGN16(bufptr);
// Allocate space for one row of results for this channel
channel_row_ptr[channel] = bufptr;
bufptr += low_wavelet->width;
// Check that the row of intermediate results is properly aligned
assert(ISALIGNED16(channel_row_ptr[channel]));
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
//HACK: Seems to work, I don't know why. //DAN20070304
if (precision == 12) precision = 8;
// Apply the inverse temporal transform to the lowpass and highpass rows
for (row = 0; row < output_height; row++)
{
// Most of the color conversion routines use zero descaling
int descale = 0;
//char *bufptr = buffer;
for (channel = 0; channel < num_channels; channel++)
{
if (frame_index == 0)
{
// Invert the temporal transform at quarter resolution to get the even row
InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
else
{
assert(frame_index == 1);
// Invert the temporal transform at quarter resolution to get the odd row
InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel],
channel_row_ptr[channel], output_width, precision);
}
// Advance to the next row in each band for the temporal transform
low_row_ptr[channel] += low_pitch[channel];
high_row_ptr[channel] += high_pitch[channel];
}
if (decoder->use_active_metadata_decoder)
{
uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes
int i;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
info2.height = 1;
for (i = 0; i < num_channels; i++)
{
channeldata[i] = (uint8_t *)channel_row_ptr[i];
channelpitch[i] = 0;
}
#if 1
{
__m128i *Y = (__m128i *)channeldata[0];
__m128i *U = (__m128i *)channeldata[1];
__m128i *V = (__m128i *)channeldata[2];
__m128i v;
int x;
__m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff);
for (x = 0; x < info->width; x += 8)
{
v = _mm_load_si128(Y);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(Y++, v);
}
for (x = 0; x < info->width / 2; x += 8)
{
v = _mm_load_si128(U);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(U++, v);
}
for (x = 0; x < info->width / 2; x += 8)
{
v = _mm_load_si128(V);
v = _mm_adds_epi16(v, rgb_limit_epi16);
v = _mm_subs_epu16(v, rgb_limit_epi16);
v = _mm_slli_epi16(v, 4);
_mm_store_si128(V++, v);
}
}
#else
//non SSE2
for (x = 0; x < info->width * 2; x++)
{
int val = *gptr++;
if (val < 0) val = 0;
if (val > 4095) val = 4095;
val <<= 4;
*src++ = val;
}
src = scanline2;
#endif
Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch,
decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch);
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert the rows of luma and chroma into the output format
switch (format)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
// Pack the intermediate results into the output row
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
assert(0);//need quarter res BAYER To YUV decoder
}
else if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, info->colorspace, format);
}
else
{
ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format);
}
break;
case COLOR_FORMAT_RGB24:
if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL,
output_width,
output_row_ptr, output_pitch,
info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width,
shift_yu64, precision, format);
break;
case COLOR_FORMAT_B64A:
if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, COLOR_FORMAT_B64A, color_space);
}
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
if ((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
else
{
ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
}
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
#if (DEBUG)
if (logfile)
{
fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format);
}
#endif
assert(0);
break;
}
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
// Convert the quarter resolution lowpass channels to the specified output format
void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels,
uint8_t *output, int output_pitch,
FRAME_INFO *info, int precision)
{
int output_width = info->width;
int output_height = info->height;
PIXEL *input_row_ptr[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr = output;
int input_pitch[CODEC_MAX_CHANNELS];
int channel;
int row;
// Value used for filling the fourth channel in ARGB output
int alpha = 255;
int format = COLORFORMAT(info);
int color_space = COLORSPACE(info);
int decoded_format = DECODEDFORMAT(info);
//bool inverted = false;
// Get pointers into the wavelets for each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the lowpass bands from the wavelets with quarter resolution
const int wavelet_index = 1;
IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index];
// The wavelet should have been reconstructed
assert(wavelet != NULL);
// The lowpass band should be valid
assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0);
// Get the pointers to the first row in each lowpass band
input_row_ptr[channel] = wavelet->band[0];
input_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
}
// Invert the image if required
switch (decoded_format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
output_row_ptr += (output_height - 1) * output_pitch;
output_pitch = NEG(output_pitch);
}
ComputeCube(decoder);
//HACK DAN20110122 -- some formats will not directly decode so need to use the AM route
{
if ( format == COLOR_FORMAT_YU64 ||
format == COLOR_FORMAT_V210 ||
format == COLOR_FORMAT_R408 ||
format == COLOR_FORMAT_V408)
{
if ( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
decoder->use_active_metadata_decoder = true;
decoder->apply_color_active_metadata = true;
}
}
}
if (decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_row_ptr;
mailbox->pitch = output_pitch;
mailbox->framenum = 0;
for (channel = 0; channel < num_channels; channel++)
{
mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel];
mailbox->channelpitch[channel] = input_pitch[channel] * sizeof(PIXEL);
}
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Convert each row to the specified output format
for (row = 0; row < output_height; row++)
{
// Right shift for converting lowpass coefficients to pixels
int descale = 4;
switch (format & 0x7fffffff)
{
case COLOR_FORMAT_YUYV:
case COLOR_FORMAT_UYVY:
if ( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
// assert(0);//need quarter res RGB To YUV decoder
ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, info->colorspace, format);
}
else
{
// Descale and pack the pixels in each output row
CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width,
precision, format);
}
break;
case COLOR_FORMAT_RGB24:
if ( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2],
output_width, output_width, output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0);
}
else
{
// Convert the intermediate results into a row of RGB24
ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space);
}
break;
case COLOR_FORMAT_RGB32:
case COLOR_FORMAT_RGB32_INVERTED:
if ( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3],
output_width,
output_row_ptr, output_pitch,
info->width, 1, 14, 0, num_channels);
}
else
{
// Convert the intermediate results into a row of RGBA32
ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, format, color_space, alpha);
}
break;
case COLOR_FORMAT_YU64:
case COLOR_FORMAT_V210:
if ( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) ||
(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444))
{
//TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122
//
}
else
{
// Convert the intermediate results into a row of YU64
ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format);
}
break;
case COLOR_FORMAT_B64A:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
// Convert the intermediate results to a row of ARGB with 16 bits per pixel
descale = 2;
ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision, format, color_space);
break;
case COLOR_FORMAT_RG48:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
case COLOR_FORMAT_RG64:
// Convert the intermediate results into a row of RGBA with 16 bits per component
descale = 2;
ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width,
descale, precision);
break;
default:
assert(0);
break;
}
// Advance the input row pointers
for (channel = 0; channel < num_channels; channel++)
{
input_row_ptr[channel] += input_pitch[channel];
}
// Advance the output row pointer
output_row_ptr += output_pitch;
}
}
}
// Release all resources allocated by the decoder
void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms)
{
#if _TIMING && 0
FILE *logfile = decoder->logfile;
uint32_t frame_count = decoder->frame_count;
if (logfile != NULL && frame_count > 0)\
{
#ifdef _WIN32
PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME);
#else
PrintStatistics(logfile, frame_count, NULL, NULL);
#endif
}
#endif
// Free the data structures allocated for decoding
ClearDecoder(decoder);
}
void DecodeForceMetadataRefresh(DECODER *decoder)
{
CFHDDATA *cfhddata = &decoder->cfhddata;
cfhddata->force_metadata_refresh = true;
if (decoder->parallelDecoder)
{
cfhddata = &decoder->parallelDecoder->cfhddata;
cfhddata->force_metadata_refresh = true;
}
}
void SetDecoderFlags(DECODER *decoder, uint32_t flags)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Set the decoder flags
decoder->flags = flags;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags);
}
#endif
}
void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution)
{
// Need to modify the codec to use the decoding format
decoder->frame.width = width;
decoder->frame.height = height;
if (format == DECODED_FORMAT_WP13)
{
decoder->frame.output_format = format;
//decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively.
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else if (format == DECODED_FORMAT_W13A)
{
decoder->frame.output_format = format;
// decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64
decoder->frame.format = format;
//decoder->frame.signed_pixels = 1;
decoder->frame.white_point = 13;
}
else
{
decoder->frame.output_format = format;
decoder->frame.format = format;
//decoder->frame.signed_pixels = 0;
decoder->frame.white_point = 16;
}
decoder->frame.resolution = resolution;
decoder->frame.pixel_size = PixelSize(decoder->frame.format);
}
void SetDecoderCapabilities(DECODER *decoder)
{
int processor_count;
#ifdef _WIN32
int limit_cpus = 32;
#else
int limit_cpus = 32; // AJA spins off too many
#endif
// Set the capabilities that are most likely supported by the Intel Mac
decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2);
if (decoder->thread_cntrl.limit)
{
limit_cpus = decoder->thread_cntrl.limit;
}
else if (decoder->thread_cntrl.affinity)
{
int i;
const int max_cpu_count = 32;
limit_cpus = 0;
for (i = 0; i < max_cpu_count; i++)
{
if (decoder->thread_cntrl.affinity & (1 << i))
{
limit_cpus++;
}
}
}
// Set the number of processors
processor_count = GetProcessorCount();
if (processor_count > limit_cpus)
processor_count = limit_cpus;
#if (0 && DEBUG)
// Set the number of processors (for debugging)
//processor_count = 8;
processor_count = 1;
fprintf(stderr, "Limit processors to %d\n", processor_count);
#endif
decoder->thread_cntrl.capabilities |= (processor_count << 16);
}
int GetDecoderCapabilities(DECODER *decoder)
{
return decoder->thread_cntrl.capabilities;
}
bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags)
{
if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE)
{
decoder->frame.colorspace = color_flags;
// Indicate that the color flags were set as specified
return true;
}
// The specified color flags were not valid
return false;
}
// Compute the resolution corresponding to the specified combination of input and output dimensions
int DecodedResolution(int input_width, int input_height, int output_width, int output_height)
{
int decoded_width;
int decoded_height;
// Output height can be negative for inverted RGB
output_height = abs(output_height);
if (output_width == input_width && output_height == input_height)
{
return DECODED_RESOLUTION_FULL;
}
// Compute the dimensions for half resolution decoding
decoded_width = input_width / 2;
decoded_height = input_height / 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height)
{
return DECODED_RESOLUTION_HALF;
}
// Compute the dimensions for quarter resolution decoding
decoded_width /= 2;
decoded_height /= 2;
// Do the output dimensions correspond to half resolution decoding?
if (output_width == decoded_width && output_height == decoded_height)
{
return DECODED_RESOLUTION_QUARTER;
}
return DECODED_RESOLUTION_UNSUPPORTED;
}
void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution,
int *decoded_width_out, int *decoded_height_out)
{
switch (decoded_resolution)
{
default:
assert(0);
case DECODED_RESOLUTION_FULL:
*decoded_width_out = encoded_width;
*decoded_height_out = encoded_height;
break;
case DECODED_RESOLUTION_HALF:
*decoded_width_out = encoded_width / 2;
*decoded_height_out = encoded_height / 2;
break;
case DECODED_RESOLUTION_QUARTER:
*decoded_width_out = encoded_width / 4;
*decoded_height_out = encoded_height / 4;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
//TODO: Check that the lowpass dimensions are correct
*decoded_width_out = encoded_width / 8;
*decoded_height_out = encoded_height / 8;
break;
}
}
// Return true if the specified resolution is supported
bool IsDecodedResolution(int resolution)
{
if (resolution == DECODED_RESOLUTION_QUARTER)
{
return true;
}
return (resolution == DECODED_RESOLUTION_FULL ||
resolution == DECODED_RESOLUTION_HALF);
}
// Return true if the encoded sample is a key frame
bool IsSampleKeyFrame(uint8_t *sample, size_t size)
{
bool key_frame_flag = false;
// Search the first twenty tags for the sample type
const int num_tags = 20;
int i;
BITSTREAM bitstream;
InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ);
for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE))
{
TAGVALUE segment = GetSegment(&bitstream);
if (segment.tuple.tag == CODEC_TAG_SAMPLE)
{
switch (segment.tuple.value)
{
case SAMPLE_TYPE_GROUP:
case SAMPLE_TYPE_FIRST:
case SAMPLE_TYPE_IFRAME:
key_frame_flag = true;
break;
case SAMPLE_TYPE_SEQUENCE_HEADER:
case SAMPLE_TYPE_FRAME:
case SAMPLE_TYPE_SECOND:
case SAMPLE_TYPE_PFRAME:
default:
key_frame_flag = false;
break;
case SAMPLE_TYPE_GROUP_TRAILER:
case SAMPLE_TYPE_NONE:
case SAMPLE_TYPE_ERROR:
case SAMPLE_TYPE_CHANNEL:
assert(0); // Unexpected situation
key_frame_flag = false; // Report the sample as a non-key frame
break;
}
break; // Found the sample type
}
}
return key_frame_flag;
}
// Return the number of the more recent decoded frame
uint32_t DecodedFrameNumber(DECODER *decoder)
{
CODEC_STATE *codec = &decoder->codec;
if (decoder == NULL) return 0;
return codec->frame_number;
}
/***** Start of the new code for the finite state machine (FSM) decoder *****/
#if _PROCESSOR_DISPATCH
__declspec(cpu_dispatch(Pentium_4, Generic))
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Stub routine for processor specific dispatch
}
#endif
#if _PROCESSOR_GENERIC
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Generic))
#endif
// This version assumes that the row is a multiple of 8 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
//assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 8 byte blocks
assert(ISALIGNED(length, 8));
// Convert the length from pixels to 8-byte blocks
count = (length >> 3);
// This code assumes that at least one 8-byte block will be zeroed
assert(count > 0);
__asm
{
pxor mm0, mm0 // Zero a 16 byte register
mov eax, rowptr // Load the pointer to the memory block
mov ebx, count // Load the count of 8-byte blocks
loop: movq [eax], mm0 // Write 8 bytes of zeros
add eax, 8 // Advance to the next 8 byte block
sub ebx, 1 // Decrement the number of blocks
jg loop
}
//_mm_empty();
}
#endif
#if _PROCESSOR_PENTIUM_4
#if _PROCESSOR_DISPATCH
__declspec(cpu_specific(Pentium_4))
#endif
#ifndef _WIN64
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
int count;
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
// Convert the length from pixels to 16-byte blocks
count = (length >> 4);
// This code assumes that at least one 16-byte block will be zeroed
assert(count > 0);
memset(rowptr, 0, length);
}
#else
// This version assumes that the row is a multiple of 16 bytes
static inline void ZeroHighPassRow(PIXEL *rowptr, int length)
{
// Check that the row starts on a 16-byte boundary
assert(ISALIGNED(rowptr, 16));
// Check that the row length (in bytes) is a multiple of 16 byte blocks
assert(ISALIGNED(length, 16));
memset(rowptr, 0, length);
}
#endif
#endif
#if (0 && _DEBUG)
// Functions for the finite state machine decoder (debug version)
static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index)
{
// Return the address of the next table entry in the finite state machine
return &fsm->next_state[index];
}
static void ResetFSM(FSM *fsm)
{
// Reset the state to the beginning of the finite state machine entries
fsm->next_state = fsm->entries;
}
static void UpdateFSM(FSM *fsm, int next)
{
// Change the state pointer to the next block of table entries
fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE);
}
#else
// Macros for the finite state machine decoder
#if _INDIVIDUAL_LUT
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0]
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next]
#define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index]
#define ResetFSMIndividual(fsm) fsm->next_state_index = 0
#define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next
#else
#define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index
#define ResetFSM(fsm) fsm->next_state = fsm->table.entries
#define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE)
#endif
#endif
#if _DEBUG
static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
}
static void DebugOutputFSM(FSM *fsm)
{
int num_entries = FSM_INDEX_ENTRIES;
int i;
for (i = 0; i < num_entries; i++)
{
FSMENTRY *entry = &fsm->table.entries[0][i];
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
}
}
static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = entry->value0 / 32;
int value1 = entry->value1 / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile)
{
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile)
{
int pre_skip = (entry->pre_post_skip & 0xFFF);
int post_skip = (entry->pre_post_skip >> 12);
// Remove companding
int value0 = (entry->values >> 16) / 32;
int value1 = (entry->values & 0xFFFF) / 32;
// Convert the index to start at the beginning of the table
index += (int)(fsm->next_state - fsm->table.entries[0]);
if (logfile)
{
fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip);
}
}
#endif
static inline int GetFastByte(BITSTREAM *stream)
{
// Inline of the third case of GetByte
uint8_t *lpCurrentWord = stream->lpCurrentWord;
// Get the next byte from the bitstream
int byte = (uint32_t )(*(lpCurrentWord++));
// Update the state of the bitstream
stream->lpCurrentWord = lpCurrentWord;
#if ERROR_TOLERANT
// Update the count of bytes used
stream->nWordsUsed--;
#endif
// Check that the high bits are zero
assert((byte & ~BITMASK(8)) == 0);
return byte;
}
// same as DecodeBandFSM8sNoGap but output to 16bit data
bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL *rowptr = (PIXEL *)image;
PIXEL16S *bandendptr;
int value;
#if ERROR_TOLERANT
uint8_t *startCurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#endif
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
if (image == NULL)
{
return false;
}
// Reset the decoder
ResetFSM(fsm);
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch * height * sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if ((rand() % 10) == 1)
stream->lpCurrentWord[rand() % 50] ^= 1;
}
#endif
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while ((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if (stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
goto SecondPass;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
SecondPass:
rowptr = (PIXEL16S *)image;
AlignBits(stream);
AlignBitsTag(stream);
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while ((intptr_t)bandendptr - (intptr_t)rowptr >= 0)
#else
for (;;)
#endif
{
// Read a byte from the bitstream
#if ERROR_TOLERANT
if (stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
rowptr[0] |= value << 8;
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] |= value << 8;
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
} while (segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// Same as DecodeBandFSM8sNoGap but output to 16bit data
#if _DEBUG
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile)
#else
bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch)
#endif
{
int index, byte;
FSMENTRY *entry;
FSMENTRYFAST *entryfast;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
ptrdiff_t offset;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
#if (0 && DEBUG)
DebugOutputBitstreamPosition(stream);
DebugOutputBitstreamBytes(stream, 16);
#endif
// Reset the decoder
ResetFSM(fsm);
#if (0 && DEBUG)
DebugOutputFSM(fsm);
#endif
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch * height * sizeof(PIXEL16S));
//memset(rowptr, 0, pitch*height*sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if ((rand() % 10) == 1)
stream->lpCurrentWord[rand() % 50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 500;
// Decode runs and magnitude values until the entire band is decoded
while (rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// Set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the appropriate distance
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntryFast(fsm, index, entryfast);
PrintFSMEntryFast(fsm, index, entryfast, logfile);
#endif
// set the pointer to the next state
UpdateFSM(fsm, (int)entryfast->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
*((uint32_t *)rowptr) = entryfast->values;
// Skip the decoded zero runs
rowptr = &rowptr[entryfast->pre_post_skip >> 12];
}
offset = CurrentWord - startCurrentWord;
stream->lpCurrentWord += offset;
stream->nWordsUsed -= (int)offset;
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while (bandendptr >= rowptr)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr))
{
return true;
}
#endif
#if (0 && DEBUG)
PrintBitstreamPosition(stream, logfile);
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if (stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0))
{
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1))
{
rowptr[1] = value;//SATURATE(value);
}
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if (0 && DEBUG)
//DebugOutputFSMEntry(fsm, index, entry);
PrintFSMEntry(fsm, index, entry, logfile);
#endif
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
if ((value = entry->value0))
{
rowptr[0] = value;//SATURATE(value);
}
// Write down the second decoded magnitude
if ((value = entry->value1))
{
rowptr[1] = value;//SATURATE(value);
}
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
} while (segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant)
{
int index, byte;
FSMENTRY *entry;
PIXEL16S *rowptr = image;
PIXEL16S *bandendptr;
PIXEL16S *fastendptr;
int32_t value;
uint8_t *startCurrentWord = stream->lpCurrentWord;
uint8_t *CurrentWord = stream->lpCurrentWord;
int32_t startWordsUsed = stream->nWordsUsed;
#if _FSMBUFFER
__declspec(align(32)) FSMENTRY buffer;
#endif
// Reset the decoder
ResetFSM(fsm);
//This is been called with non-prequantized FSM
if (quant > 1) level /= quant;
pitch /= sizeof(PIXEL16S);
// Zero out the entire subband
ZeroHighPassRow((PIXEL *)rowptr, pitch * height * sizeof(PIXEL16S));
// This Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
bandendptr = rowptr + height * pitch;
#if 0 // test for errors.
{
if ((rand() % 10) == 1)
stream->lpCurrentWord[rand() % 50] ^= 1;
}
#endif
fastendptr = bandendptr;
fastendptr -= 1000;
// Decode runs and magnitude values until the entire band is decoded
while (rowptr < fastendptr)
{
// Read a byte from the bitstream
byte = *CurrentWord++;
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if (abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if (abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord);
stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord));
// Decode runs and magnitude values until the entire band is decoded
#if ERROR_TOLERANT
while (((intptr_t)bandendptr - (intptr_t)rowptr) >= 0)
#else
for (;;)
#endif
{
#if (0 && DEBUG)
if (!(rowptr < bandendptr))
{
return true;
}
#endif
// Read a byte from the bitstream
#if ERROR_TOLERANT
if (stream->nWordsUsed)
{
byte = GetFastByte(stream);
}
else
{
break;
}
#else
byte = GetFastByte(stream);
#endif
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// Set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if (abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the appropriate distance
rowptr = &rowptr[entry->pre_post_skip >> 12];
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
#if _FSMBUFFER
memcpy(&buffer, entry, sizeof(FSMENTRY));
entry = &buffer;
#endif
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
assert(rowptr <= bandendptr);
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip & 0xfff];
// Write down the first decoded magnitude
value = entry->value0;
if (abs(value) > level)
rowptr[0] = *peaks++ / quant;
else
rowptr[0] = value;//SATURATE(value);
// Write down the second decoded magnitude
value = entry->value1;
rowptr[1] = value;//SATURATE(value);
// Skip the decoded zero runs
rowptr = &rowptr[entry->pre_post_skip >> 12];
}
#if ERROR_TOLERANT
// Reset the decoder
ResetFSM(fsm);
// Backup the bitstream to the beginning of the band
stream->lpCurrentWord = startCurrentWord;
stream->nWordsUsed = startWordsUsed;
#if 0
AlignBitsTag(stream);
// Read the debugging marker
{
TAGVALUE segment;
do
{
segment = GetTagValue(stream);
} while (segment.tuple.tag != CODEC_TAG_BAND_TRAILER);
stream->lpCurrentWord -= 4;
stream->nWordsUsed += 4;
}
#else
SkipSubband(stream);
#endif
#endif
return true;
}
// Decode the highpass band coefficients but do not write them out - used in SIF mode
bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch)
{
int index, byte;
FSMENTRY *entry;
pitch /= sizeof(PIXEL8S);
// The Huffman decoder assumes each byte is processed as two 4-bit chunks
assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE * 2);
assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE);
// Decode runs and magnitude values until the entire band is decoded
for (;;)
{
// Read a byte from the bitstream
byte = GetFastByte(stream);
// Decode the first 4-bit chunk
index = byte >> FSM_INDEX_SIZE;
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
// decode the second 4-bit chunk
index = byte & ((1 << FSM_INDEX_SIZE) - 1);
// Index into the lookup table at that state
entry = GetFSMTableEntry(fsm, index);
// Return if the subband is decoded completely
if (entry->value0 == BAND_END_TRAILER)
{
ResetFSM(fsm);
return true;
}
// set the pointer to the next state
UpdateFSM(fsm, (int)entry->next_state);
}
}
#if _TIMING
extern TIMER tk_fastruns;
#endif
#if _DEQUANTIZE_IN_FSM
void ReQuantFSM(FSM *fsm, int quant)
{
int count = 0;
int i, j;
short *restore = &fsm->restoreFSM[0];
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
entry[j].value0 = restore[count++];
entry[j].value1 = restore[count++];
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm_table.entries_ind[i];
if (entry)
{
entry->value0 = restore[count++];
entry->value1 = restore[count++];
}
}
#endif
}
void DeQuantFSM(FSM *fsm, int quant)
{
int i, j;
if (fsm->LastQuant > 1 && fsm->LastQuant != quant)
{
ReQuantFSM(fsm, fsm->LastQuant);
}
else if (fsm->LastQuant == quant)
{
return;
}
if (fsm->InitizedRestore == 0)
{
short *restore = &fsm->restoreFSM[0];
int count = 0;
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
restore[count++] = entry[j].value0;
restore[count++] = entry[j].value1;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if (entry)
{
restore[count++] = entry->value0;
restore[count++] = entry->value1;
}
}
#endif
fsm->InitizedRestore = 1;
}
#if !_INDIVIDUAL_ENTRY
for (i = 0; i < fsm->table.num_states; i++)
{
FSMENTRY *entry = fsm->table.entries[i];
for (j = 0; j < (1 << FSM_INDEX_SIZE); j++)
{
if (entry[j].value0 < 0x7ff0) // band end trailer
entry[j].value0 *= quant;
entry[j].value1 *= quant;
}
}
#else
for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++)
{
FSMENTRY *entry = fsm->table.entries_ind[i];
if (entry)
{
if (entry->value0 < 0x7ff0) // band end trailer etc
entry->value0 *= quant;
entry->value1 *= quant;
}
}
#endif
fsm->LastQuant = quant;
}
#endif // _DEQUANTIZE_IN_FSM
// New version of coefficient runs decoder that uses a finite state machine with a scaling factor
//dan 7-11-03
bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height, int threading)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result = true;
int quant = wavelet->quantization[band_index];
int active_codebook = decoder->codec.active_codebook;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[active_codebook];
int size;
PIXEL *rowptr;
//int row = 0;
int pitch;
CODEC_STATE *codec = &decoder->codec;
//int channel = codec->channel;
//int subband = codec->band.subband;
//int num_subbands = codec->num_subbands;
//int pixel_type = wavelet->pixel_type[band_index];
int difference_coding = decoder->codec.difference_coding;
//int localquant = 1;
int peaklevel = 0;
//int peaksize = 0;
PIXEL *peakbase = NULL;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n",
subband, decoder->codec.active_codebook, difference_coding);
}
#endif
decoder->codec.active_codebook = 0; // reset CODEC state
decoder->codec.difference_coding = 0; //reset state for next subband
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if (fsm == NULL) return false;
// All rows are treated as one long row that covers the entire band
size = fsm->table.num_states;
assert(size > 0);
if (size == 0)
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is intended for 8-bit pixels
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
//pitch = wavelet->pitch8s; // Use the 8-bit pitch
pitch = wavelet->pitch;
peaklevel = codec->peak_table.level;
peakbase = codec->peak_table.base;
#if _THREADED
threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0;
if (threading)
{
decoder->entropy_worker_new.threads_used = 1;
{
//int start = stream->nWordsUsed;
int end;
struct entropy_data_new *data;
int next_queue_num = decoder->entropy_worker_new.next_queue_num++;
data = &decoder->entropy_worker_new.entropy_data[next_queue_num];
memcpy(&data->stream, stream, sizeof(BITSTREAM));
data->rowptr = rowptr;
data->width = width;
data->height = height;
data->pitch = pitch;
data->peaks = peakbase;
data->level = peaklevel;
data->quant = quant;
data->wavelet = wavelet;
data->band_index = band_index;
data->active_codebook = active_codebook;
data->difference_coding = difference_coding;
// Start only a particular threadid
if (next_queue_num == 0)
{
ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1);
#if _DELAYED_THREAD_START==0
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
}
else
{
// Set the work count to the number of rows to process
ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1);
}
{
unsigned short tag = *(stream->lpCurrentWord - 8) << 8;
if (tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE))
{
int chunksize;
int value = *(stream->lpCurrentWord - 6) << 8;
value |= *(stream->lpCurrentWord - 5);
tag |= *(stream->lpCurrentWord - 7);
tag = NEG(tag);
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag & 0xff) << 16);
chunksize *= 4;
chunksize -= 8;
{
uint32_t *ptr = (uint32_t *)stream->lpCurrentWord;
ptr += (chunksize >> 2);
if (*ptr != 0x00003800) // bandend
{
goto continuesearch;
}
}
stream->lpCurrentWord += chunksize;
stream->nWordsUsed -= chunksize;
end = stream->nWordsUsed;
}
else
{
continuesearch:
while (*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend
{
stream->lpCurrentWord += 4;
stream->nWordsUsed -= 4;
}
end = stream->nWordsUsed;
}
}
}
}
else
#endif // _THREADED
{
DeQuantFSM(fsm, quant);
if (peaklevel)
{
result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1);
}
else
{
#if _DEBUG
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile);
#else
result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch);
#endif
}
if (difference_coding)
{
int x, y;
PIXEL *line = rowptr;
for (y = 0; y < height; y++)
{
for (x = 1; x < width; x++)
{
line[x] += line[x - 1];
}
line += pitch / 2;
}
}
if (result)
{
// Call thread safe routine to update the band valid flags
UpdateWaveletBandValidFlags(decoder, wavelet, band_index);
}
}
assert(result == true);
if (result != true)
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
//end:
STOP(tk_fastruns);
return true;
}
bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet,
int band_index, int width, int height)
{
//CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
int result;
// Get the pointer to the finite state machine
FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026
// All rows are treated as one long row that covers the entire band
int size = fsm->table.num_states;
PIXEL *rowptr;
//int row = 0;
int pitch;
//int pixel_type = wavelet->pixel_type[band_index];
decoder->codec.active_codebook = 0; // reset CODEC state
// Must have a valid wavelet
assert(wavelet != NULL);
if (wavelet == NULL) return false;
//Must have a valid FSM
assert(fsm != NULL);
if (fsm == NULL) return false;
assert(size > 0);
if (size == 0)
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
// Check if the band is 8bit/pixel
assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S);
START(tk_fastruns);
rowptr = (PIXEL *)wavelet->band[band_index];
pitch = wavelet->pitch8s; // Use the 8-bit pitch
// The finite state machine does not support a marker at the end of rows
#if RUNS_ROWEND_MARKER
assert(0);
#endif
#if 1 // Get one byte from the bitstream and decode 4 bits at a time
result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch);
assert(result == true);
if (result != true)
{
decoder->error = CODEC_ERROR_RUN_DECODE;
return false;
}
#endif
#if (0 && DEBUG && _WIN32)
_CrtCheckMemory();
#endif
#if (0 && DEBUG)
if (logfile)
DumpBand("Band", wavelet, band_index, NULL, logfile);
#endif
//end:
STOP(tk_fastruns);
return true;
}
/***** End of the code for the finite state machine decoder *****/
#if 1
// The second version applies the horizontal inverse filters row by row, so the
// memory access pattern is more efficient. The lowpass and highpass temporal
// coefficients for each row are inverted and packed into the output in one pass.
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
#if DEBUG
assert((2 * num_channels * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20)
{
char label[PATH_MAX];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s / sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
//DAN20051004 -- possible reversiblity issue
//InvertHorizontalRow8sBuffered //----------------------- Maybe bad
InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
uint8_t *output_row_ptr = output;
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int output_width;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Divide the buffer space between the four threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20)
{
char label[PATH_MAX];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
horizontal_pitch8s[channel] = wavelet->pitch8s / sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
//TODO: Need to recode the buffer allocations using the scratch space API
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
output += field_pitch * (half_height - 1);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // what about middle threads?
}
#if (0 && DEBUG)
if (logfile) {
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
*/
// Loop until all of the rows have been processed
for (;;)
{
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if (decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if (decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * 2 * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch * row;
horizontal_lowhigh[channel] += pitch * row;
horizontal_highlow[channel] += pitch * row;
horizontal_highhigh[channel] += pitch * row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
//PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size);
// assert(0 <= row && row < half_height);
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n",
row, thread_index, output_row_ptr);
}
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
#if (0 && DEBUG)
// Invert the horizontal transform by duplicating the lowpass pixels
InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel], horizontal_width[channel],
(PIXEL *)line_buffer);
#else
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
#endif
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
//horizontal_lowlow[channel] += pitch;
//horizontal_lowhigh[channel] += pitch;
//horizontal_highlow[channel] += pitch;
//horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
if (precision == CODEC_PRECISION_10BIT)
{
// Invert the temporal bands from all channels and pack output pixels
switch (frame->format)
{
// Need to reduce the resolution from 10 bits to 8 bits during the inverse
case DECODED_FORMAT_YUYV:
InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
case DECODED_FORMAT_UYVY:
InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset);
break;
default:
assert(0);
break;
}
}
else // Older code for 8-bit precision
{
int format;
assert(precision == CODEC_PRECISION_8BIT);
switch (frame->format)
{
case DECODED_FORMAT_YUYV:
format = COLOR_FORMAT_YUYV;
break;
case DECODED_FORMAT_UYVY:
format = COLOR_FORMAT_UYVY;
break;
}
// Invert the temporal bands from all channels and pack output pixels
InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels,
output_row_ptr, output_pitch, output_width, frame_width,
chroma_offset, format);
}
// Advance to the next row in the input transforms
//row += row_step;
// Advance to the next row in the packed output image
//output += field_pitch;
}
else
{
// No more rows to process
break;
}
}
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset,
int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
#if DEBUG
size_t buffer_size = scratch->free_size;
#endif
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width / 2;
int channel;
int row;
#if (DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
#if DEBUG
assert((2 * temporal_row_size) <= buffer_size);
#endif
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20)
{
char label[PATH_MAX];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
#if (DEBUG_ROW16U)
PIXEL16U *output_row_ptr = output_buffer;
PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS];
int planar_pitch[TRANSFORM_MAX_CHANNELS];
ROI strip = {luma_width, 2};
uint8_t *yuv_output = (uint8_t *)output;
uint8_t *output1 = yuv_output;
uint8_t *output2 = yuv_output + output_pitch;
#else
PIXEL16U *output_row_ptr = output;
#endif
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
else
{
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
}
//***DEBUG***
//memset(temporal_highpass, 0, temporal_row_size);
//FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
#if (DEBUG_ROW16U)
// Write the rows of 16-bit pixels to a temporary buffer
planar_output[channel] = output_row_ptr;
planar_pitch[channel] = output_pitch * sizeof(PIXEL);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
planar_output[channel], planar_pitch[channel],
output_row_width[channel],
frame_width, chroma_offset, precision);
//if (channel > 0)
if (0)
{
uint8_t *output3 = (uint8_t *)planar_output[channel];
uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel];
int output_size = output_row_width[channel] * sizeof(PIXEL);
int fill_value = (128 << 8);
//memset(output3, 0, output_size);
//memset(output4, 0, output_size);
FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value);
FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value);
}
#else
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
#endif
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
// Check the output row alignment
assert(ISALIGNED16(output_row_ptr));
}
// Advance to the next group of rows in the output image
output += field_pitch / sizeof(PIXEL16U);
}
}
//#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels,
PIXEL16U *output, int output_pitch, FRAME_INFO *frame,
int chroma_offset, int precision)
{
FILE *logfile = decoder->logfile;
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Buffers for the rows in the temporal wavelet (reused for each channel)
PIXEL *temporal_lowpass;
PIXEL *temporal_highpass;
int output_row_width[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
int field_pitch = 2 * output_pitch;
int luma_width = frame_width;
int chroma_width = luma_width / 2;
int channel;
int row;
HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore;
int return_value;
#if (DEBUG_ROW16U)
PIXEL16U *output_buffer;
#endif
// This routine should only be called to decode rows of 16-bit luma and chroma
//assert(frame->format == DECODED_FORMAT_YR16);
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
#if 0
if (thread_index == 1)
{
// Skip over the buffer space used by the other thread
size_t buffer_usage = 2 * temporal_row_size;
buffer += buffer_usage;
buffer_size -= buffer_usage;
}
#else
// Divide the buffer space between the two threads
buffer_size /= 4;
buffer += buffer_size * thread_index;
#endif
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass)
// plus the buffer used by the inverse horizontal transform for its intermediate results
assert((2 * temporal_row_size) <= buffer_size);
// Allocate buffers for one row of lowpass and highpass temporal coefficients
temporal_lowpass = (PIXEL *)&buffer[0];
temporal_highpass = (PIXEL *)&buffer[temporal_row_size];
#if (DEBUG_ROW16U)
output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size];
#endif
// Initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
#if (0 && DEBUG)
int static count = 0;
if (count < 20)
{
char label[PATH_MAX];
int i;
sprintf(label, "Frame%d-%d-", frame_index, count);
DumpPGM(label, wavelet, NULL);
for (i = 1; i < wavelet->num_bands; i++)
{
sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count);
DumpBandPGM(label, wavelet, i, NULL);
}
}
count++;
#endif
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Compute the width of each row of output pixels
output_row_width[channel] = (channel == 0) ? luma_width : chroma_width;
}
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output);
}
#endif
/* if (thread_index == 0)
{
row = 0;
row_step = 1;
}
else if (thread_index == 1)
{
row = half_height - 1;
row_step = -1;
// Move to the bottom of the transform and process moving up
for (channel = 0; channel < num_channels; channel++)
{
int offset = horizontal_pitch[channel] * (half_height - 1);
horizontal_lowlow[channel] += offset;
horizontal_lowhigh[channel] += offset;
horizontal_highlow[channel] += offset;
horizontal_highhigh[channel] += offset;
horizontal_pitch[channel] = NEG(horizontal_pitch[channel]);
//horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]);
}
//output += field_pitch * (half_height - 1);
output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U);
output_pitch = NEG(output_pitch);
field_pitch = NEG(field_pitch);
}
else
{
assert(0); // middle threads
}
*/
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n",
thread_index, row, row_step, field_pitch);
}
#endif
// Loop until all of the rows have been processed
for (;;)
{
PIXEL16U *output_row_ptr;
// Wait for one row from each channel to invert the transform
return_value = WaitForSingleObject(row_semaphore, 0);
// Determine the index of this worker thread
if (return_value == WAIT_OBJECT_0)
{
if (decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
row = decoder->interlaced_worker.current_row++;
if (decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
output_row_ptr = output;
output_row_ptr += row * output_pitch;
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
horizontal_lowlow[channel] += pitch * row;
horizontal_lowhigh[channel] += pitch * row;
horizontal_highlow[channel] += pitch * row;
horizontal_highhigh[channel] += pitch * row;
}
}
if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height)
{
assert(0 <= row && row < half_height);
if (decoder->frame.resolution == DECODED_RESOLUTION_FULL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
// Invert the horizontal transform applied to the temporal lowpass row
BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel],
temporal_lowpass, horizontal_width[channel]);
// Invert the horizontal transform applied to the temporal highpass row
BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel],
temporal_highpass, horizontal_width[channel]);
// Invert the temporal transform and output two rows of luma or chroma
InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass,
output_row_ptr, output_pitch, output_row_width[channel],
frame_width, chroma_offset, precision);
// Advance the output row pointer to the next channel
output_row_ptr += output_row_width[channel];
}
}
}
else
{
// No more rows to process
break;
}
}
#if (DEBUG)
if (logfile)
{
fprintf(logfile, "Finished transform, thread index: %d\n", thread_index);
}
#endif
}
#endif
extern void fast_srand( int seed );
// Apply the inverse horizontal-temporal transform and pack the output into a buffer
void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels,
uint8_t *output, int output_pitch, FRAME_INFO *frame,
const SCRATCH *scratch, int chroma_offset, int precision)
{
// Pointers to the rows in the horizontal wavelet for each channel
PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS];
PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS];
// Horizontal wavelet band width and pitch
int horizontal_width[TRANSFORM_MAX_CHANNELS];
int horizontal_pitch[TRANSFORM_MAX_CHANNELS];
//int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS];
// Quantization factors
int lowlow_quantization[TRANSFORM_MAX_CHANNELS];
int lowhigh_quantization[TRANSFORM_MAX_CHANNELS];
int highlow_quantization[TRANSFORM_MAX_CHANNELS];
int highhigh_quantization[TRANSFORM_MAX_CHANNELS];
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
// Pointers to the rows in the temporal wavelet for each channel
PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS];
PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS];
// Dimensions of the reconstructed frame
int frame_width = frame->width;
int frame_height = frame->height;
int half_height = frame_height / 2;
size_t temporal_row_size = frame_width * sizeof(PIXEL);
size_t temporal_buffer_size = 2 * num_channels * temporal_row_size;
#if DEBUG
size_t yuv_row_size = frame_width * 2;
#endif
char *yuv_buffer;
size_t yuv_buffer_size;
int field_pitch = 2 * output_pitch;
int format = frame->format;
bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32);
int output_width;
int channel;
int row;
// Round up the temporal row size to an integral number of cache lines
temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE);
// Check that the buffer starts on a cache line boundary
assert(ISALIGNED(buffer, _CACHE_LINE_SIZE));
// Check that the number of channels is reasonable
assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS);
// Check that the buffer is large enough
assert((2 * num_channels * temporal_row_size) <= buffer_size);
// Allocate buffers for a single row of lowpass and highpass temporal coefficients
// and initialize the arrays of row pointers into the horizontal transform bands
for (channel = 0; channel < num_channels; channel++)
{
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
// Initialize the row pointers into the horizontal bands
horizontal_lowlow[channel] = wavelet->band[LL_BAND];
horizontal_lowhigh[channel] = wavelet->band[LH_BAND];
horizontal_highlow[channel] = wavelet->band[HL_BAND];
horizontal_highhigh[channel] = wavelet->band[HH_BAND];
lowlow_quantization[channel] = wavelet->quantization[LL_BAND];
lowhigh_quantization[channel] = wavelet->quantization[LH_BAND];
highlow_quantization[channel] = wavelet->quantization[HL_BAND];
highhigh_quantization[channel] = wavelet->quantization[HH_BAND];
// Compute the pitch in units of pixels
horizontal_pitch[channel] = wavelet->pitch / sizeof(PIXEL);
// Compute the 8-bit pitch in units of pixels
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL);
//horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S);
// Remember the width of the horizontal wavelet rows for this channel
horizontal_width[channel] = wavelet->width;
// Divide the buffer into temporal lowpass and highpass rows
temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size);
temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size);
}
// Allocate buffer space for the intermediate YUV data
yuv_buffer = buffer + temporal_buffer_size;
yuv_buffer_size = buffer_size - temporal_buffer_size;
#if DEBUG
assert(yuv_buffer_size >= 2 * yuv_row_size);
#endif
if (inverted)
{
output += (frame_height - 1) * output_pitch;
output_pitch = (- output_pitch);
field_pitch = (- field_pitch);
}
// Process one row at a time from each channel
for (row = 0; row < half_height; row++)
{
PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size);
// Invert the horizontal transform applied to the temporal bands in each channel
for (channel = 0; channel < num_channels; channel++)
{
int pitch = horizontal_pitch[channel];
//int pitch8s = horizontal_pitch8s[channel];
// Invert the horizontal transform applied to the temporal lowpass row
InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel],
(PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel],
temporal_lowpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Invert the horizontal transform applied to the temporal highpass row
InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel],
(PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel],
temporal_highpass[channel],
horizontal_width[channel],
(PIXEL *)line_buffer);
// Advance to the next row in each horizontal band in this channel
horizontal_lowlow[channel] += pitch;
horizontal_lowhigh[channel] += pitch;
horizontal_highlow[channel] += pitch;
horizontal_highhigh[channel] += pitch;
}
// The output width is twice the width of the wavelet bands
output_width = 2 * horizontal_width[0];
// Adjust the frame width to fill to the end of each row
//frame_width = output_pitch / 2;
//#if BUILD_PROSPECT
if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64)
{
// Invert the temporal bands from all channels and pack as V210 output
InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, chroma_offset, precision);
}
else
//#endif
{
// Invert the temporal bands from all channels and pack as 8-bit output
InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels,
output, output_pitch, output_width, frame_width,
yuv_buffer, yuv_buffer_size, format, frame->colorspace,
chroma_offset, precision, row);
}
// Advance to the next row in the packed output image
output += field_pitch;
}
}
void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format)
{
bool inverted = false;
size_t output_size;
START(tk_convert);
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
default: // Unsupported format (return a blank frame)
assert(0);
output_size = image->height * output_pitch;
memset(output_buffer, COLOR_CHROMA_ZERO, output_size);
break;
}
STOP(tk_convert);
}
void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height,
int output_pitch, bool inverted)
{
IMAGE *y_image = images[0];
IMAGE *u_image = images[1];
IMAGE *v_image = images[2];
int width = y_image->width;
int height = output_height;
PIXEL *y_row_ptr = y_image->band[0];
PIXEL *u_row_ptr = u_image->band[0];
PIXEL *v_row_ptr = v_image->band[0];
int y_pitch = y_image->pitch / sizeof(PIXEL);
int u_pitch = u_image->pitch / sizeof(PIXEL);
int v_pitch = v_image->pitch / sizeof(PIXEL);
uint8_t *outrow = output_buffer;
uint8_t *outptr;
int row, column;
// Definitions for optimization
//const int column_step = 2 * sizeof(__m64);
// Column at which post processing must begin
//int post_column = width - (width % column_step);
// The output pitch should be a positive number before inversion
assert(output_pitch > 0);
// Should the image be inverted?
if (inverted)
{
outrow += (height - 1) * output_pitch; // Start at the bottom row
output_pitch = NEG(output_pitch); // Negate the pitch to go up
}
for (row = 0; row < height; row++)
{
outptr = outrow;
// Fill the rest of the output row
for (column = 0; column < width; column += 4)
{
int chroma_column = column >> 1;
*(outptr++) = SATURATE_8U((y_row_ptr[column] + y_row_ptr[column + 1]) >> 5);
*(outptr++) = SATURATE_8U((v_row_ptr[chroma_column] + v_row_ptr[chroma_column + 1]) >> 5);
*(outptr++) = SATURATE_8U((y_row_ptr[column + 2] + y_row_ptr[column + 3]) >> 5);
*(outptr++) = SATURATE_8U((u_row_ptr[chroma_column] + u_row_ptr[chroma_column + 1]) >> 5);
}
// Advance to the next rows in the input and output images
y_row_ptr += y_pitch;// 3D Work
u_row_ptr += u_pitch;
v_row_ptr += v_pitch;
outrow += output_pitch;
}
}
// Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer
void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth)
{
//IMAGE *image = frame->channel[0];
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int descale = precision - 8;
// Get the color format from the decoded format
int color_format = info->format & COLOR_FORMAT_MASK;
// Must compile this routine with switches set for decoding to 8-bit unsigned pixels
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
assert(0);
return;
#endif
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
if (encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB24, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
if (encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels);
}
else
{
ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch,
COLOR_FORMAT_RGB32, info->colorspace, inverted, descale);
}
break;
case DECODED_FORMAT_RG48:
if (encode_format == ENCODED_FORMAT_BAYER)
{
ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height,
output_pitch, 2, num_channels);
}
else if (encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
int scale = 1;
if (encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
scale = 2;
ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height,
output_pitch, scale, num_channels);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
case DECODED_FORMAT_RG64:
if (encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
assert(0);
}
break;
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
if (encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444)
{
ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch,
descale, num_channels, info->format & 0xffff);
}
else
{
ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width,
output_height, output_pitch, info->colorspace, inverted, descale,
info->format, whitebitdepth);
}
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if (precision == CODEC_PRECISION_10BIT)
{
int lineskip = 1; // 3D Work
int pitch = output_pitch;
if (decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
{
if (decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work
{
lineskip = 2;
if (decoder->channel_blend_type == 3)
pitch *= 2;
}
}
if ((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side
{
SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted);
}
else
{
//ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip);
ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip);
}
}
else
{
//ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted);
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted);
}
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
#if 0
case DECODED_FORMAT_UYVY:
ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted);
break;
#endif
case DECODED_FORMAT_V210:
if (precision == CODEC_PRECISION_10BIT)
{
ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted);
}
else
{
//ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted);
assert(0);
}
break;
case DECODED_FORMAT_YU64:
ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision);
break;
case DECODED_FORMAT_YR16:
ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
#if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0)
#error Must set compile-time switches to decode to 8-bit pixels
#endif
START(tk_convert);
#if _ENCODE_CHROMA_OFFSET
#error Cannot handle images encoded with a non-zero chroma offset
#endif
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
#if 0
case DECODED_FORMAT_YUYV_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_YUYV:
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
#if 0
case DECODED_FORMAT_UYVY_INVERTED:
inverted = true;
// Fall through and convert to YUV (first image row displayed at the bottom)
#endif
case DECODED_FORMAT_UYVY:
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi,
uint8_t *output_buffer, int output_pitch, int frame_width,
int format, int colorspace)
{
bool inverted = false;
int output_width = roi.width;
START(tk_convert);
// Determine the type of conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
//ConvertPlanarYUVToRGB
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB24, colorspace, inverted);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_RGB32, colorspace, inverted);
break;
case COLOR_FORMAT_WP13:
case COLOR_FORMAT_B64A:
case COLOR_FORMAT_RG48:
case COLOR_FORMAT_R210:
case COLOR_FORMAT_DPX0:
case COLOR_FORMAT_RG30:
case COLOR_FORMAT_AR10:
case COLOR_FORMAT_AB10:
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL);
break;
case DECODED_FORMAT_YUYV:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi,
output_buffer, output_pitch, frame_width, format);
break;
case DECODED_FORMAT_UYVY:
assert(0);// These routines are not yet updated for ROW16u inputs
ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch,
COLOR_FORMAT_UYVY, colorspace, inverted);
break;
default: // Unsupported format (output a blank frame)
assert(0);
break;
}
STOP(tk_convert);
}
// Convert one row of packed YUYV to the specified color
void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision)
{
size_t row_size = 2 * length;
bool inverted = false;
START(tk_convert);
// Determine the type of color conversion
switch (format)
{
case DECODED_FORMAT_RGB24:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB24_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision);
break;
case DECODED_FORMAT_RGB32:
inverted = true;
// Fall through and convert to RGB (first image row displayed at the bottom)
case DECODED_FORMAT_RGB32_INVERTED:
ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision);
break;
case DECODED_FORMAT_YUYV:
if (precision == 8)
memcpy(output, input, row_size);
else
{
//need to dither to 8-bit
assert(0);
}
break;
case DECODED_FORMAT_UYVY:
if (precision == 8)
ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY);
else
{
//need to dither to 8-bit
assert(0);
}
break;
//#if BUILD_PROSPECT
case DECODED_FORMAT_V210:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210);
break;
case DECODED_FORMAT_YU64:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
case DECODED_FORMAT_BYR3:
case DECODED_FORMAT_BYR4:
assert(0); // should get here with 8bit data.
//ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64);
break;
//#endif
default: // Unsupported format (output a blank frame)
assert(0);
memset(output, 0, row_size);
break;
}
STOP(tk_convert);
}
#if _THREADED_DECODER
IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index,
int width, int height, int level, int type)
{
IMAGE *wavelet = transform->wavelet[index];
assert(decoder != NULL && transform != NULL);
if (decoder != NULL && transform != NULL)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
// Lock access to the wavelet data
#if _DELAYED_THREAD_START==0
Lock(&decoder->entropy_worker_new.lock);
#endif
// Get the wavelet from the transform data structure (thread safe)
wavelet = transform->wavelet[index];
// Allocate (or reallocate) the wavelet
#if _ALLOCATOR
wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type);
#else
wavelet = ReallocWaveletEx(wavelet, width, height, level, type);
#endif
// Save this wavelet in the transform data structure
transform->wavelet[index] = wavelet;
// Unlock access to the wavelet data
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
return wavelet;
}
// Update the codec state with the information in a tag value pair
CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
bool optional = false;
int chunksize = 0;
bool result;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n",
tag, value, optional);
}
#endif
switch (tag)
{
case CODEC_TAG_ZERO: // Used internally
assert(0); // Should not occur in the bitstream
error = CODEC_ERROR_INVALID_BITSTREAM;
break;
case CODEC_TAG_SAMPLE: // Type of sample
//assert(0);
if (value == SAMPLE_TYPE_CHANNEL)
{
result = DecodeSampleChannelHeader(decoder, input);
if (!result)
error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_INDEX: // Sample index table
//assert(0); // Need to figure out how to return the group index
{
int count = value;
uint32_t *index = (uint32_t *)(&codec->channel_size[0]);
DecodeGroupIndex(input, index, count);
codec->num_channels = count;
}
break;
case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband?
{
// This tag is obsolete and not used in modern streams
int subband = value;
// Check that the subband number makes sense
assert(0 <= subband && subband <= codec->max_subband);
if (! (0 <= subband && subband <= codec->max_subband))
{
error = CODEC_ERROR_DECODING_SUBBAND;
break;
}
// Decompress the subband
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act.
codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete
{
// This tag value pair marks the beginning of the encoded coefficients
// The subband number has already been decoded
int subband = codec->band.subband;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
break;
case CODEC_TAG_ENTRY: // Entry in sample index
assert(0); // Need to figure out how to return the group index
break;
case CODEC_TAG_MARKER: // Bitstream marker
{
int marker = value;
uint8_t *current_position;
// Save the current bitstream position
current_position = GetBitstreamPosition(input);
current_position -= 4; // Step back to before the GetSegment i.e. the TAG
if (IsLowPassHeaderMarker(marker))
{
// Save the bitstream position for the start of the channel
codec->channel_position = current_position;
}
else if (IsLowPassBandMarker(marker))
{
int subband = 0;
result = DecodeSampleSubband(decoder, input, subband);
if (!result)
error = CODEC_ERROR_DECODING_SUBBAND;
else
error = CODEC_ERROR_OKAY;
}
}
break;
case CODEC_TAG_VERSION_MAJOR: // Version
assert(0);
break;
case CODEC_TAG_VERSION_MINOR: // Minor version number
assert(0);
break;
case CODEC_TAG_VERSION_REVISION: // Revision number
assert(0);
break;
case CODEC_TAG_VERSION_EDIT: // Edit number
assert(0);
break;
case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags
assert(0);
break;
case CODEC_TAG_TRANSFORM_TYPE: // Type of transform
assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST);
if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST)
{
int i;
codec->transform_type = value;
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
TRANSFORM *transform = decoder->transform[i];
if (transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
else
error = CODEC_ERROR_TRANSFORM_TYPE;
break;
case CODEC_TAG_NUM_FRAMES: // Number of frames in the group
assert(0 <= value && value <= TRANSFORM_NUM_FRAMES);
if (0 <= value && value <= TRANSFORM_NUM_FRAMES)
codec->num_frames = value;
else
error = CODEC_ERROR_NUM_FRAMES;
break;
case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform
assert(value <= CODEC_MAX_CHANNELS);
if (value <= CODEC_MAX_CHANNELS)
codec->num_channels = value;
else
error = CODEC_ERROR_NUM_CHANNELS;
break;
case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform
assert(0 < value && value <= TRANSFORM_NUM_WAVELETS);
if (0 < value && value <= TRANSFORM_NUM_WAVELETS)
codec->num_wavelets = value;
else
error = CODEC_ERROR_NUM_WAVELETS;
break;
case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands
assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS);
if (0 < value && value <= TRANSFORM_NUM_SUBBANDS)
codec->num_subbands = value;
else
error = CODEC_ERROR_NUM_SUBBANDS;
break;
case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels
assert(0 < value && value <= TRANSFORM_NUM_SPATIAL);
if (0 < value && value <= TRANSFORM_NUM_SPATIAL)
codec->num_spatial = value;
else
error = CODEC_ERROR_NUM_SPATIAL;
break;
case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet
assert(value == TRANSFORM_FIRST_WAVELET);
if (value == TRANSFORM_FIRST_WAVELET)
codec->first_wavelet = value;
else
error = CODEC_ERROR_FIRST_WAVELET;
break;
case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel
assert(0);
break;
case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start
codec->frame.type = value;
break;
case CODEC_TAG_FRAME_WIDTH: // Width of the frame
codec->frame.width = value;
break;
case CODEC_TAG_FRAME_HEIGHT: // Height of the frame
codec->frame.height = value;
//DAN20080729 -- Initialize the default colorspace based on clip resolution
if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED)
{
int internalheight = value;
int internalwidth = codec->frame.width;
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
{
internalwidth *= 2;
internalheight *= 2;
}
if (internalheight > 576 || internalwidth > 720)
decoder->frame.colorspace |= COLOR_SPACE_CG_709;
else
decoder->frame.colorspace |= COLOR_SPACE_CG_601;
}
//if(decoder->frame.colorspace_filedefault)
// decoder->frame.colorspace = decoder->frame.colorspace_filedefault;
if (decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
break;
case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729
if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)
value &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709); // Bayer has no 601 vs 709,
//there was a bug in 3.9.4 that had bayer flagged as 601.
if (decoder->frame.colorspace_override)
decoder->frame.colorspace = decoder->frame.colorspace_override;
else
{
if (decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422)
{
decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709);
decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709));
//Let the VSRGB status be controllable by the calling application (e.g. Vegas)
}
else
{
decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB);
decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB));
}
}
decoder->frame.colorspace_filedefault = value;
break;
case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA)
assert(0);
break;
case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels
codec->input_format = value;
// Set the encoded format if it has not already been set
// error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value);
break;
case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data
case CODEC_TAG_OLD_ENCODED_FORMAT:
codec->encoded_format = value;
if (codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3)
codec->encoded_format = ENCODED_FORMAT_RGB_444;
break;
case CODEC_TAG_FRAME_INDEX: // Position of frame within the group
codec->frame.group_index = value;
break;
case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum
codec->sample_done = true;
break;
case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band
codec->lowpass.subband = value;
error = SetDefaultEncodedFormat(codec);
break;
case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels
codec->lowpass.level = value;
break;
case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band
codec->lowpass.width = value;
break;
case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band
codec->lowpass.height = value;
break;
case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset
codec->lowpass.margin.top = value;
break;
case CODEC_TAG_MARGIN_BOTTOM:
codec->lowpass.margin.bottom = value;
break;
case CODEC_TAG_MARGIN_LEFT:
codec->lowpass.margin.left = value;
break;
case CODEC_TAG_MARGIN_RIGHT:
codec->lowpass.margin.right = value;
break;
case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters
codec->lowpass.pixel_offset = value;
break;
case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding
codec->lowpass.quantization = value;
break;
case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel
codec->lowpass.bits_per_pixel = value;
break;
case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer
assert(0);
break;
case CODEC_TAG_WAVELET_TYPE: // Type of wavelet
codec->highpass.wavelet_type = value;
break;
case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform
codec->highpass.wavelet_number = value;
break;
case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform
codec->highpass.wavelet_level = value;
break;
case CODEC_TAG_NUM_BANDS: // Number of wavelet bands
codec->highpass.num_bands = value;
break;
case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band
codec->highpass.width = value;
break;
case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band
codec->highpass.height = value;
break;
case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete)
codec->highpass.lowpass_border = value;
break;
case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete)
codec->highpass.highpass_border = value;
break;
case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band
codec->highpass.lowpass_scale = value;
break;
case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band
codec->highpass.lowpass_divisor = value;
break;
case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer
assert(0);
break;
case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band
codec->band.number = value;
break;
case CODEC_TAG_BAND_WIDTH: // Band data width
codec->band.width = value;
break;
case CODEC_TAG_BAND_HEIGHT: // Band data height
codec->band.height = value;
break;
case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band
codec->band.subband = value;
//assert(value != 255);
break;
case CODEC_TAG_BAND_ENCODING: // Encoding method for this band
codec->band.encoding = value;
break;
case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band
codec->band.quantization = value;
break;
case CODEC_TAG_BAND_SCALE: // Band scale factor
codec->band.scale = value;
break;
case CODEC_TAG_BAND_TRAILER: // Band trailer
assert(0);
break;
case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values
assert(0);
break;
case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees
assert(0);
break;
case CODEC_TAG_NUM_POSITIVES: // Number of positive values
assert(0);
break;
case CODEC_TAG_NUM_NEGATIVES: // Number of negative values
assert(0);
break;
case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes
assert(0);
break;
case CODEC_TAG_CHANNEL: // Channel number
assert(0);
break;
case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream
//assert(0);
break;
//assert(0);
case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio
codec->picture_aspect_x = value;
//assert(0);
break;
case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio
codec->picture_aspect_y = value;
//assert(0);
break;
case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding
// Progressive versus interlaced decoding is specified by the sample flags
error = UpdateCodecFlags(codec, value);
break;
case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream
codec->frame_number = value;
break;
// This TAG is now support as part of the universal decoder.
// Only Prospect HD builds can decode 10bit.
case CODEC_TAG_PRECISION: // Number of bits in the video source
codec->precision = value;
{
int i;
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
TRANSFORM *transform = decoder->transform[i];
if (transform)
{
GetTransformPrescale(transform, codec->transform_type, codec->precision);
}
}
}
break;
case CODEC_TAG_PRESCALE_TABLE:
{
int i;
int prescale[TRANSFORM_MAX_WAVELETS] = {0};
for (i = 0; i < TRANSFORM_MAX_WAVELETS; i++)
prescale[i] = value >> (14 - i * 2) & 0x3;
for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++)
{
TRANSFORM *transform = decoder->transform[i];
if (transform)
{
memcpy(transform->prescale, prescale, sizeof(prescale));
}
}
}
break;
case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP.
codec->version[0] = (value >> 12) & 0xf;
codec->version[1] = (value >> 8) & 0xf;
codec->version[2] = value & 0xff;
break;
case CODEC_TAG_QUALITY_L: //
codec->encode_quality &= 0xffff0000;
codec->encode_quality |= value;
break;
case CODEC_TAG_QUALITY_H: //
codec->encode_quality &= 0xffff;
codec->encode_quality |= value << 16;
break;
case CODEC_TAG_BAND_CODING_FLAGS:
codec->active_codebook = value & 0xf; // 0-15 valid code books
codec->difference_coding = (value >> 4) & 1;
break;
// Peak table processing
case CODEC_TAG_PEAK_TABLE_OFFSET_L:
codec->peak_table.offset &= ~0xffff;
codec->peak_table.offset |= (value & 0xffff);
codec->peak_table.base = (PIXEL *)(input->lpCurrentWord);
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_TABLE_OFFSET_H:
codec->peak_table.offset &= 0xffff;
codec->peak_table.offset |= (value & 0xffff) << 16;
codec->peak_table.level = 0; // reset for the next subband
break;
case CODEC_TAG_PEAK_LEVEL:
codec->peak_table.level = value;
codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL);
break;
case CODEC_TAG_PEAK_TABLE:
//this is the chunk header, so we have peak data
codec->peak_table.level = 0; // reset for the next subband
//Just skip as the data was read ahead
chunksize = value;
chunksize &= 0xffff;
input->lpCurrentWord += chunksize * 4;
input->nWordsUsed -= chunksize * 4;
break;
#if (DEBUG)
case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only)
assert(0);
break;
#endif
default: // Unknown tag
if (tag & 0x4000)
{
if (tag & 0x2000) // i.e. 0x6xxx = 24bit size.
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag & 0xff) << 16);
}
else // 16bit size
{
chunksize = value;
chunksize &= 0xffff;
}
}
else if (tag & 0x2000) //24bit LONGs chunk size
{
optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not
// updated the size and turned the tag to optional. TODO : WHY
chunksize = 0; // not not skip
// chunksize = value + ((tag & 0xff)<<16);
// do not skip an unknown but optional chunk
// These are only use to size subbands, but the data within should not be skipped
// unless
if ((tag & 0xff00) == CODEC_TAG_UNCOMPRESS)
{
optional = true;
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag & 0xff) << 16);
decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord;
decoder->uncompressed_size = chunksize * 4;
decoder->sample_uncompressed = 1;
}
}
assert(optional);
if (!optional)
{
error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG;
}
else if (chunksize > 0) // skip this option chunk
{
input->lpCurrentWord += chunksize * 4;
input->nWordsUsed -= chunksize * 4;
}
break;
}
return error;
}
void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
#if _THREADED_DECODER
// Lock access to the wavelet data
if (decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n",
wavelet->band_valid_flags, BAND_VALID_MASK(band));
}
#endif
// Update the wavelet band flags
wavelet->band_valid_flags |= BAND_VALID_MASK(band);
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _THREADED_DECODER
// Unlock access to the wavelet data
if (decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band)
{
assert(decoder != NULL);
assert(wavelet != NULL);
if (decoder != NULL && wavelet != NULL)
{
// Update the wavelet band flags
#if _DELAYED_THREAD_START==0
if (decoder->entropy_worker_new.pool.thread_count)
Lock(&decoder->entropy_worker_new.lock);
#endif
wavelet->band_started_flags |= BAND_VALID_MASK(band);
#if _DELAYED_THREAD_START==0
if (decoder->entropy_worker_new.pool.thread_count)
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type)
{
uint32_t threaded_band_mask;
uint32_t wavelet_band_mask;
uint32_t decoded_band_mask;
bool decoded_bands_valid;
// Has this wavelet been created?
if (wavelet == NULL)
{
// Too soon to wait for the wavelet bands to be decoded
return false;
}
// Is this a fieldplus transform?
if (transform_type == TRANSFORM_TYPE_FIELDPLUS)
{
// Is this the temporal wavelet?
if (index == 2)
{
assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL);
assert(wavelet->num_bands == 2);
// Earlier transforms in the queue will compute both wavelet bands
return true;
}
// Is this wavelet at the end of a chain of transforms?
if (index == 3 || index == 5)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
// Is this a spatial transform?
else if (transform_type == TRANSFORM_TYPE_SPATIAL)
{
// Is this wavelet at the top of the pyramid?
if (index == 2)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#if 0
// Is this wavelet at the bottom of the pyramid?
else if (index == 0)
{
// Must wait for all bands to be decoded
threaded_band_mask = 0;
}
#endif
else
{
// The lowpass band will be computed by transforms earlier in the queue
threaded_band_mask = BAND_VALID_MASK(0);
}
}
else
{
// Unknown type of transform
assert(0);
// Assume that the bands are not valid
return false;
}
// Compute the mask for the bands in this wavelet
decoded_band_mask = ((1 << wavelet->num_bands) - 1);
// Clear the bit for the band computed by the threaded transform
decoded_band_mask &= ~threaded_band_mask;
// Compute the wavelet bands that have been decoded
wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask);
// Have all of the bands not computed by the transform thread been decoded?
decoded_bands_valid = (wavelet_band_mask == decoded_band_mask);
return decoded_bands_valid;
}
void QueueThreadedTransform(DECODER *decoder, int channel, int index)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
TRANSFORM *transform = decoder->transform[channel];
//IMAGE *wavelet = transform->wavelet[index];
int precision = codec->precision;
// The transform data structure must exist
assert(transform != NULL);
// The transform thread variables should have been created
{
int free_entry;
#if _DELAYED_THREAD_START==0
// Lock access to the transform queue
Lock(&decoder->entropy_worker_new.lock);
#endif
// Copy the transform parameters into the next queue entry
free_entry = decoder->transform_queue.free_entry;
assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH);
if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH)
{
assert(transform != NULL);
assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS);
assert(0 <= index && index < TRANSFORM_MAX_WAVELETS);
// Note: The wavelet may not exist when the transform is queued
decoder->transform_queue.queue[free_entry].transform = transform;
decoder->transform_queue.queue[free_entry].channel = channel;
decoder->transform_queue.queue[free_entry].index = index;
decoder->transform_queue.queue[free_entry].precision = precision;
decoder->transform_queue.queue[free_entry].done = 0;
// Update the transform request queue
decoder->transform_queue.free_entry++;
decoder->transform_queue.num_entries++;
#if (DEBUG)
if (logfile)
{
fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index);
}
#endif
}
#if _DELAYED_THREAD_START==0
Unlock(&decoder->entropy_worker_new.lock);
#endif
}
}
#if _THREADED_DECODER
void WaitForTransformThread(DECODER *decoder)
{
if (decoder->entropy_worker_new.pool.thread_count)
{
#if _DELAYED_THREAD_START
ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START);
#endif
ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool);
decoder->transform_queue.started = 0;
decoder->transform_queue.num_entries = 0;
decoder->transform_queue.next_entry = 0;
decoder->transform_queue.free_entry = 0;
}
}
#endif
#endif
#if _INTERLACED_WORKER_THREADS
void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount, i;
// There are half as many input rows as output rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_YUV;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, UINT32_MAX);
}
void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
PIXEL16U *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
int32_t lPreviousCount, i;
// There are half as many input rows as output rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Post a message to the mailbox
struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data;
mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = (uint8_t *)output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
// Set the semaphore to the number of rows
decoder->interlaced_worker.current_row = 0;
ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount);
assert(lPreviousCount == 0);
// Wake up both worker threads
for (i = 0; i < THREADS_IN_LAST_WAVELET; i++)
{
SetEvent(decoder->interlaced_worker.start_event[i]);
}
// Wait for both worker threads to finish
WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, UINT32_MAX);
}
DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam)
{
DECODER *decoder = (DECODER *)lpParam;
FILE *logfile = decoder->logfile;
struct interlace_data *data = &decoder->interlaced_worker.interlace_data;
int thread_index;
HANDLE hObjects[2];
DWORD dwReturnValue;
if (decoder->thread_cntrl.affinity)
{
HANDLE hCurrentThread = GetCurrentThread();
SetThreadAffinityMask(hCurrentThread, decoder->thread_cntrl.affinity);
}
// Determine the index of this worker thread
if (decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
thread_index = decoder->interlaced_worker.thread_count++;
if (decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// The transform worker variables should have been created
assert(decoder->interlaced_worker.start_event[thread_index] != NULL);
assert(decoder->interlaced_worker.row_semaphore != NULL);
assert(decoder->interlaced_worker.done_event[thread_index] != NULL);
assert(decoder->interlaced_worker.stop_event != NULL);
if (!(decoder->interlaced_worker.start_event[thread_index] != NULL &&
decoder->interlaced_worker.row_semaphore != NULL &&
decoder->interlaced_worker.done_event[thread_index] != NULL &&
decoder->interlaced_worker.stop_event != NULL))
{
return 1;
}
hObjects[0] = decoder->interlaced_worker.start_event[thread_index];
hObjects[1] = decoder->interlaced_worker.stop_event;
for (;;)
{
// Wait for the signal to begin processing a transform
dwReturnValue = WaitForMultipleObjects(2, hObjects, false, UINT32_MAX);
// Received a signal to begin inverse transform processing?
if (dwReturnValue == WAIT_OBJECT_0)
{
int type; // Type of inverse transform to perform
int frame_index; // Index of output frame to produce
int num_channels; // Number of channels in the transform array
uint8_t *output; // Output frame buffer
int pitch; // Output frame pitch
FRAME_INFO info; // Format of the output frame
int chroma_offset; // Offset for the output chroma
int precision; // Source pixel bit depth
// Lock access to the transform data
if (decoder->interlaced_worker.lock_init)
{
EnterCriticalSection(&decoder->interlaced_worker.lock);
}
// Get the processing parameters
type = data->type;
frame_index = data->frame;
num_channels = data->num_channels;
output = data->output;
pitch = data->pitch;
memcpy(&info, &data->info, sizeof(FRAME_INFO));
chroma_offset = data->chroma_offset;
precision = data->precision;
// Unlock access to the transform data
if (decoder->interlaced_worker.lock_init)
LeaveCriticalSection(&decoder->interlaced_worker.lock);
// Select the type of inverse transform to perform
switch (type)
{
case THREAD_TRANSFORM_FRAME_YUV:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels,
output, pitch, &info, chroma_offset, precision);
break;
case THREAD_TRANSFORM_FRAME_ROW16U:
//TODO: more to new _THREADED model
TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels,
(PIXEL16U *)output, pitch, &info, chroma_offset, precision);
break;
default:
assert(0);
break;
}
// Signal that this thread is done
SetEvent(decoder->interlaced_worker.done_event[thread_index]);
}
else
{
// Should have a condition that causes the thread to terminate
assert(dwReturnValue == WAIT_OBJECT_0 + 1 || dwReturnValue == WAIT_ABANDONED);
break;
}
}
return 0;
}
#endif
void GetDecodedFrameDimensions(TRANSFORM **transform_array,
int num_channels,
int frame_index,
int resolution,
int *decoded_width_out,
int *decoded_height_out)
{
IMAGE *wavelet = NULL;
int decoded_scale = 0;
int wavelet_width;
int wavelet_height;
int decoded_width;
int decoded_height;
// Get the decoding scale
switch (resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_FULL:
#if DEBUG
assert(AllTransformBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 2;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
#if DEBUG
assert(AllLowpassBandsValid(transform_array, num_channels, frame_index));
#endif
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[0];
break;
case DECODED_RESOLUTION_QUARTER:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[3];
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
decoded_scale = 1;
wavelet = transform_array[0]->wavelet[5];
// Is this an intra frame?
if (wavelet == NULL)
{
wavelet = transform_array[0]->wavelet[2];
}
break;
default:
assert(0);
break;
}
// Compute the decoded frame dimensions
assert(wavelet != NULL);
wavelet_width = wavelet->width;
wavelet_height = wavelet->height;
decoded_width = decoded_scale * wavelet_width;
decoded_height = decoded_scale * wavelet_height;
if (decoded_width_out)
{
*decoded_width_out = decoded_width;
}
if (decoded_height_out)
{
*decoded_height_out = decoded_height;
}
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
//int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_W13A: //DAN20101207 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YUYV: //?
case DECODED_FORMAT_UYVY: //?
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
error = CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR2:
case DECODED_FORMAT_BYR4:
{
//bool linearRestore = false;
unsigned short *curve = NULL;
if (decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0)
{
curve = decoder->BYR4LinearRestore;
}
ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve);
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
case DECODED_FORMAT_BYR3:
ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
break;
}
if (error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
frame_size = width * height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
if (decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size * 3;
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size * 4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size * 3;
}
}
// Using the RGBFilterBuffer16 as scratch space
ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution);
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
#if _THREADED
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0)
{
int height = info->height;
if (info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct uncompressed v210 YUV format to the requested output format
CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
int width = info->width;
int height = info->height;
int resolution = info->resolution;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if (format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if (unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if (unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for (y = 0; y < height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
if ((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if (unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for (y = 0; y < height; y++)
{
uint32_t *input_ptr = (uint32_t *)src;
int pos = 0;
int column = 0, length = width;
length -= length % 6; //DAN03252004 -- fix a memory overflow.
for (column = 0; column < length; column += 6)
{
uint32_t yuv;
int y;
int u;
int v;
// Read the first word
yuv = *(input_ptr++);
u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
// Expand the pixels to sixteen bits
u <<= 6;
y <<= 6;
v <<= 6;
dst[pos++] = SATURATE_16U(y) >> 8;
dst[pos++] = SATURATE_16U(u) >> 8;
// Read the second word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y) >> 8;
dst[pos++] = SATURATE_16U(v) >> 8;
u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y) >> 8;
dst[pos++] = SATURATE_16U(u) >> 8;
// Read the third word
yuv = *(input_ptr++);
v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y) >> 8;
dst[pos++] = SATURATE_16U(v) >> 8;
u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
u <<= 6;
// Read the fourth word
yuv = *(input_ptr++);
y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK;
y <<= 6;
dst[pos++] = SATURATE_16U(y) >> 8;
dst[pos++] = SATURATE_16U(u) >> 8;
v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK;
y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK;
v <<= 6;
y <<= 6;
dst[pos++] = SATURATE_16U(y) >> 8;
dst[pos++] = SATURATE_16U(v) >> 8;
}
if (format == DECODED_FORMAT_UYVY)
{
for (column = 0; column < pos; column += 2)
{
int t = dst[column];
dst[column] = dst[column + 1];
dst[column + 1] = t;
}
}
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if (resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if (resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if (decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if (decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row, lines = 1;
int start, end;
if (resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if (resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24)
{
start = height - 1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i, unc_Stride = decoder->uncompressed_size / orig_height;
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width / 2;
for (i = 0; i < lines; i++)
{
src = (uint8_t *)decoder->uncompressed_chunk;
src += row * unc_Stride;
// Repack the row of 10-bit pixels into 16-bit pixels
ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2);
// Advance to the next rows in the input and output images
y_row_ptr += orig_width * 2;
u_row_ptr = y_row_ptr + orig_width;
v_row_ptr = u_row_ptr + orig_width / 2;
}
y_row_ptr = (PIXEL16U *)scanline;
u_row_ptr = y_row_ptr + width;
v_row_ptr = u_row_ptr + width / 2;
if (lines == 2)
{
for (i = 0; i < width * 2; i++)
y_row_ptr[i] = (y_row_ptr[i * 2] + y_row_ptr[i * 2 + 1] + y_row_ptr[orig_width * 2 + i * 2] + y_row_ptr[orig_width * 2 + i * 2 + 1]) >> 2;
}
else if (lines == 4)
{
for (i = 0; i < width * 2; i++)
y_row_ptr[i] = (y_row_ptr[i * 4] + y_row_ptr[i * 4 + 2] + y_row_ptr[orig_width * 2 * 2 + i * 4] + y_row_ptr[orig_width * 2 * 2 + i * 4 + 2]) >> 2;
}
roi.width = width;
roi.height = 1;
planar_output[0] = (uint8_t *)y_row_ptr;
planar_output[1] = (uint8_t *)v_row_ptr;
planar_output[2] = (uint8_t *)u_row_ptr;
planar_pitch[0] = 0;
planar_pitch[1] = 0;
planar_pitch[2] = 0;
if (decoder->apply_color_active_metadata)
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline,
info->format, &whitebitdepth, &flags);
}
else
{
ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi,
(unsigned char *)scanline2, width, output_pitch,
COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags);
sptr = scanline2;
}
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct uncompressed DPX0 RGB format to the requested output format
CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int precision = codec->precision;
int format = info->format;
//int output_format = info->output_format; // used by image_dev_only decodes
int width = info->width;
int height = info->height;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
if ( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) &&
resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false)
{
int smallest_Stride = output_pitch;
int unc_Stride = decoder->uncompressed_size / height;
if (unc_Stride < smallest_Stride)
smallest_Stride = unc_Stride;
if (format != DECODED_FORMAT_DPX0)
{
int unc_Stride = decoder->uncompressed_size / height;
ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format);
}
if (unc_Stride == output_pitch)
memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size);
else
{
int y;
uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
uint8_t *dst = (uint8_t *)output_buffer;
for (y = 0; y < height; y++)
{
memcpy(dst, src, smallest_Stride);
src += unc_Stride;
dst += output_pitch;
}
}
decoder->uncompressed_chunk = 0;
decoder->uncompressed_size = 0;
return CODEC_ERROR_OKAY;
}
{
// Expand YUV at the target resolution, and use the ActiveMetadata engine.
// Need to allocate a scratch buffer for decoding the frame?
if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer
{
//int pixel_size = 2 * sizeof(PIXEL16U);
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
int orig_width = width;
if (resolution == DECODED_RESOLUTION_HALF)
orig_width *= 2;
if (resolution == DECODED_RESOLUTION_QUARTER)
orig_width *= 4;
if (decoder->RawBayer16)
{
#if _ALLOCATOR
FreeAligned(allocator, decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#else
MEMORY_ALIGNED_FREE(decoder->RawBayer16);
decoder->RawBayer16 = NULL;
decoder->RawBayerSize = 0;
#endif
}
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = orig_width * 64;
}
}
// unpack source original YUV into YU64?
if (decoder->RawBayer16)
{
//uint8_t *src = (uint8_t *)decoder->uncompressed_chunk;
//uint8_t *dst = (uint8_t *)output_buffer;
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output_buffer;
mailbox->pitch = output_pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
{
int orig_width = width;
int orig_height = height;
int row, lines = 1;
int start, end;
if (resolution == DECODED_RESOLUTION_HALF)
{
orig_width *= 2;
orig_height *= 2;
lines = 2;
}
if (resolution == DECODED_RESOLUTION_QUARTER)
{
orig_width *= 4;
orig_height *= 4;
lines = 4;
}
start = 0;
end = height;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit
{
start = height - 1;
end = -1;
}
for (row = start; row != end; end > start ? row++ : row--)
{
int whitebitdepth = 16;
int flags = 0;
uint8_t *planar_output[3];
int planar_pitch[3];
ROI roi;
PIXEL16U *y_row_ptr;
PIXEL16U *u_row_ptr;
PIXEL16U *v_row_ptr;
PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16;
PIXEL16U *scanline2 = scanline + orig_width * 8;
unsigned short *sptr;
int i, unc_Stride = decoder->uncompressed_size / orig_height;
whitebitdepth = 13;
if (decoder->apply_color_active_metadata)
flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR;
else
flags = 0;
roi.width = width;
roi.height = 1;
if (lines == 1)
{
uint16_t *sptr;
uint32_t j, *lptr = (uint32_t *)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride >> 2);
sptr = (uint16_t *)lptr;
for (i = 0; i < width; i += 8)
{
int val, r, g, b;
if (flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
if (decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for (j = 0; j < 8; j++)
{
ptr[j] = sptr[0] >> 3;
ptr[j + 8] = sptr[1] >> 3;
ptr[j + 16] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for (j = 0; j < 8; j++)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j + 8] = g;
ptr[j + 16] = b;
}
}
}
else
{
if (decoder->image_dev_only) // HACK, currently assuming RG48 input data.
{
for (j = 0; j < 8 * 3; j += 3)
{
ptr[j] = sptr[0] >> 3;
ptr[j + 1] = sptr[1] >> 3;
ptr[j + 2] = sptr[2] >> 3;
sptr += 3;
}
}
else
{
for (j = 0; j < 8 * 3; j += 3)
{
val = SwapInt32(*lptr++);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
ptr[j] = r;
ptr[j + 1] = g;
ptr[j + 2] = b;
}
}
}
ptr += 24;
}
}
else if (lines == 2)
{
uint32_t j, *lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride >> 2) * lines;
for (i = 0; i < width; i += 8)
{
int val, r, g, b, r2, g2, b2, r3, g3, b3, r4, g4, b4;
for (j = 0; j < 8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride >> 2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride >> 2) + 1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if (flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r >> 2;
ptr[j + 8] = g >> 2;
ptr[j + 16] = b >> 2;
}
else
{
ptr[j * 3] = r >> 2;
ptr[j * 3 + 1] = g >> 2;
ptr[j * 3 + 2] = b >> 2;
}
lptr += lines;
}
ptr += 24;
}
}
else if (lines == 4)
{
uint32_t j, *lptr = (uint32_t)decoder->uncompressed_chunk;
PIXEL16U *ptr = (PIXEL16U *)scanline;
lptr += row * (unc_Stride >> 2) * lines;
for (i = 0; i < width; i += 8)
{
int val, r, g, b, r2, g2, b2, r3, g3, b3, r4, g4, b4;
for (j = 0; j < 8; j++)
{
val = SwapInt32(lptr[0]);
val >>= 2;
b = (val & 0x3ff) << 3;
val >>= 10;
g = (val & 0x3ff) << 3;
val >>= 10;
r = (val & 0x3ff) << 3;
val = SwapInt32(lptr[2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[unc_Stride >> 1]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
val = SwapInt32(lptr[(unc_Stride >> 1) + 2]);
val >>= 2;
b += (val & 0x3ff) << 3;
val >>= 10;
g += (val & 0x3ff) << 3;
val >>= 10;
r += (val & 0x3ff) << 3;
if (flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR)
{
ptr[j] = r >> 2;
ptr[j + 8] = g >> 2;
ptr[j + 16] = b >> 2;
}
else
{
ptr[j * 3] = r >> 2;
ptr[j * 3 + 1] = g >> 2;
ptr[j * 3 + 2] = b >> 2;
}
lptr += lines;
}
ptr += 24;
}
}
sptr = scanline;
if (decoder->apply_color_active_metadata)
sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2,
info->format, &whitebitdepth, &flags);
ConvertLinesToOutput(decoder, width, 1, row, sptr,
dst, output_pitch, format, whitebitdepth, flags);
dst += output_pitch;
}
}
#endif
}
error = CODEC_ERROR_OKAY;
return error;
}
// Reconstruct Bayer format to the requested output format
CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
int resolution = info->resolution;
//int format = info->format;
// Switch to the subroutine for the requested resolution
switch (resolution)
{
case DECODED_RESOLUTION_FULL_DEBAYER:
case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
//error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch);
break;
case DECODED_RESOLUTION_FULL:
//return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
//case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER:
case DECODED_RESOLUTION_HALF_NODEBAYER:
case DECODED_RESOLUTION_HALF:
//return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_QUARTER:
//return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
case DECODED_RESOLUTION_LOWPASS_ONLY:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
default:
// The decoded resolution is not supported by this routine
assert(0);
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to full resolution
CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
//int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
//int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
//int chroma_offset = decoder->codec.chroma_offset;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0))
{
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if (decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size * 3;
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size * 4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size * 3;
}
//#endif
}
//TODO: Need to add more output formats to this routine
switch (format)
{
case DECODED_FORMAT_RGB32:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
// TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
// ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
case DECODED_FORMAT_RGB24:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
// Decode the last transform to rows of Bayer data (one row per channel)
//TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
// decoder->RawBayer16, raw_bayer_pitch, info,
// &decoder->scratch, chroma_offset, precision);
//ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch,
// output_buffer, output_pitch,
// width, height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data and demosaic to full resolution
CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
int format = info->format;
int width = info->width;
//int height = info->height;
// Compute the number of bytes between each row of Bayer data
int bayer_pitch = 2 * width * sizeof(PIXEL16U);
// Compute the pitch between pairs of rows of bayer data (one pair per image row)
//int raw_bayer_pitch = 2 * bayer_pitch;
int chroma_offset = decoder->codec.chroma_offset;
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
switch (format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here.
case DECODED_FORMAT_WP13: //DAN20090120 ""
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_V210:
case DECODED_FORMAT_YU64:
error = CODEC_ERROR_OKAY;
break;
}
if (error)
return error;
//int row;
//int column;
// Need to allocate a scratch buffer for decoding the Bayer frame?
if (decoder->RawBayer16 == NULL)
{
TRANSFORM **transform_array = decoder->transform;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int format = info->format;
// Four Bayer data samples at each 2x2 quad in the grid
int pixel_size = 4 * sizeof(PIXEL16U);
int frame_size;
const size_t alignment = 16;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
// Compute the decoded width and height for the specified resolution
GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (! (decoded_width > 0 && decoded_height > 0))
{
return CODEC_ERROR_UNSUPPORTED_FORMAT;
}
frame_size = decoded_width * decoded_height * pixel_size;
#if _ALLOCATOR
decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment);
#else
decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment);
#endif
assert(decoder->RawBayer16 != NULL);
if (! (decoder->RawBayer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RawBayerSize = frame_size;
//#ifdef SHARPENING
if (decoder->RGBFilterBuffer16 == NULL)
{
int size = frame_size * 3;
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
size = frame_size * 4;
#if _ALLOCATOR
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16);
#else
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size * 3;
}
//#endif
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RawBayer16, bayer_pitch * sizeof(PIXEL),
info, chroma_offset, precision);
//DemosaicRAW
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
int inverted = false;
uint8_t *output = output_buffer;
int pitch = output_pitch;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
inverted = true;
}
// Have the output location and pitch been inverted?
if (inverted && pitch > 0)
{
int height = info->height;
if (info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER)
height *= 2;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
#else
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
#endif
return error;
}
// Reconstruct Bayer encoded data to half resolution
CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int frame_width = info->width;
int frame_height = info->height;
//int resolution = info->resolution;
int format = info->format;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
PIXEL16U *g1_plane;
PIXEL16U *rg_plane;
PIXEL16U *bg_plane;
PIXEL16U *g2_plane;
int g1_pitch;
int rg_pitch;
int bg_pitch;
int g2_pitch;
#if 0
int channel;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
#if (0 && DEBUG)
if (logfile)
{
char label[PATH_MAX];
char *format = decoded_format_string[info->format];
sprintf(label, "Output, channel: %d, format: %s", channel, format);
DumpImageStatistics(label, lowpass_images[channel], logfile);
}
#endif
}
#endif
// Get the lowpass bands in the wavelet coresponding to the output frame
g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0];
rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0];
bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0];
if (transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK
{
g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0];
g2_pitch = transform_array[3]->wavelet[frame]->pitch;
}
else
{
g2_plane = NULL;
g2_pitch = 0;
}
// Get the pitch of each plane
g1_pitch = transform_array[0]->wavelet[frame]->pitch;
rg_pitch = transform_array[1]->wavelet[frame]->pitch;
bg_pitch = transform_array[2]->wavelet[frame]->pitch;
switch (format)
{
case DECODED_FORMAT_RGB32:
ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch,
bg_plane, bg_pitch, g2_plane, g2_pitch,
output_buffer, output_pitch,
frame_width, frame_height);
break;
default:
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
break;
}
return error;
}
// Reconstruct Bayer encoded data to quarter resolution
CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//FRAME_INFO *info = &decoder->frame;
//CODEC_STATE *codec = &decoder->codec;
//int num_channels = codec->num_channels;
//int progressive = codec->progressive;
//int precision = codec->precision;
//TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
//int resolution = info->resolution;
//int format = info->format;
//TODO: Need to finish this routine
assert(0);
return error;
}
// Reconstruct the original YUV 4:2:2 encoded format to the requested output format
CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
int progressive = codec->progressive;
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
//int decoded_width = 0;
//int decoded_height = 0;
int resolution = info->resolution;
int format = info->format;
//int color_space = decoder->frame.colorspace;
//TODO: Eliminate use of the chroma offset
int chroma_offset = decoder->codec.chroma_offset;
#if _THREADED
// Type of threaded inverse transform
//int type;
#endif
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
if (decoder == NULL)
{
return CODEC_ERROR_INVALID_ARGUMENT;
}
//TODO: Split this routine into subroutines for progressive versus interlaced video
//TODO: Split progressive and interlaced routines into subroutines for each resolution
if (resolution == DECODED_RESOLUTION_HALF)
{
bool inverted = false;
FRAME_INFO info2;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0)
{
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
if (decoder->use_active_metadata_decoder)
{
#if _THREADED
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
return CODEC_ERROR_OKAY;
#endif
}
else
{
int precision = codec->precision;
TRANSFORM **transform_array = decoder->transform;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[frame];
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset,
precision, decoder->codec.encoded_format, decoder->frame.white_point);
}
return CODEC_ERROR_OKAY;
}
// Was the video source interlaced or progressive?
if (progressive)
{
// The video source was progressive (the first transform was a spatial transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
FRAME_INFO info2;
int format;
bool inverted = false;
int precision = codec->precision;
memcpy(&info2, info, sizeof(FRAME_INFO));
format = info2.format;
if (format == DECODED_FORMAT_RGB24)
{
format = DECODED_FORMAT_RGB24_INVERTED;
info2.format = format;
inverted = true;
}
else if (format == DECODED_FORMAT_RGB32)
{
format = DECODED_FORMAT_RGB32_INVERTED;
info2.format = format;
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0)
{
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
/*if(decoder->use_active_metadata_decoder)
{
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV: // computing the active metadata.
case DECODED_FORMAT_UYVY:
return CODEC_ERROR_OKAY;
break;
}
}*/
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
if (decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sYUVtoRGB);
return CODEC_ERROR_OKAY;
#endif
}
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
if (decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
else
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToYUV);
return CODEC_ERROR_OKAY;
#endif
}
break;
//Handle sizes that are smaller than the interim decode buffer //DAN20081222
case DECODED_FORMAT_CbYCrY_10bit_2_8:
decoder->upper_plane = output;
decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2;
// Use the address and pitch of the lower plane
output = decoder->lower_plane;
pitch = decoder->frame.width * 2;
// Fall through and compute the inverse spatial transform
case DECODED_FORMAT_CbYCrY_16bit_2_14:
case DECODED_FORMAT_CbYCrY_16bit_10_6:
case DECODED_FORMAT_CbYCrY_8bit:
case DECODED_FORMAT_CbYCrY_16bit:
if (decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sToOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_V210:
if (decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalYUVStrip16sToYUVOutput);
return CODEC_ERROR_OKAY;
}
break;
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED:
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
case DECODED_FORMAT_WP13:
case DECODED_FORMAT_W13A:
if ((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false)
{
#if _THREADED
TransformInverseSpatialThreadedYUV422ToBuffer(decoder,
frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
#elif 0
TransformInverseSpatialToBuffer(decoder, transform_array, frame,
num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array,
frame, num_channels, output, pitch,
&info2, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripYUV16sToPackedRGB32);
#endif
return CODEC_ERROR_OKAY;
}
#if _THREADED
if (decoder->use_active_metadata_decoder)
{
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
}
else
{
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame,
num_channels, output, pitch,
&info2, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
&info2, chroma_offset, precision);
return CODEC_ERROR_OKAY;
}
#endif
break;
default:
if (decoder->use_active_metadata_decoder)
{
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sThruActiveMetadata);
return CODEC_ERROR_OKAY;
#endif
}
// else Return the error code for unsupported output format
break;
}
}
}
else
{
// The video source was interlaced (the first transform was a frame transform)
if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL)
{
bool inverted = false;
if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24)
{
// info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active.
inverted = true;
}
#if 1
// Have the output location and pitch been inverted?
if (inverted && pitch > 0)
{
int height = info->height;
output += (height - 1) * pitch; // Start at the bottom row
pitch = NEG(pitch); // Negate the pitch to go up
}
#endif
switch (format & 0x7ffffff)
{
case DECODED_FORMAT_NV12:
case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder
if (decoder->use_active_metadata_decoder)
{
int frame_size = info->width * info->height * 4;
if (decoder->RGBFilterBuffer16 == NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if (decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if (decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
//TransformInverseSpatialUniversalThreadedToRow16u(
// decoder, frame, num_channels,
// (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2,
// info, chroma_offset, precision);
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)decoder->RGBFilterBuffer16,
info->width * 4, info,
&decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 2; // yuv
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
return CODEC_ERROR_OKAY;
}
}
switch (format)
{
// As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works.
case DECODED_FORMAT_WP13: //DAN20110203 - missing
case DECODED_FORMAT_W13A: //DAN20110203 - missing
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into
case DECODED_FORMAT_R408: //the output buffer
case DECODED_FORMAT_V408:
case DECODED_FORMAT_YU64:
case DECODED_FORMAT_YR16:
#if _INTERLACED_WORKER_THREADS
StartInterlaceWorkerThreads(decoder);
//TODO: support new threading
// Send the upper and lower rows of the transforms to the worker threads
TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels,
(PIXEL16U *)output, pitch,
info, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
#else
// Transform the wavelets for each channel to the output image (not threaded)
TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels,
(PIXEL16U *)output, pitch, info,
&decoder->scratch, chroma_offset, precision);
ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch,
info, chroma_offset, precision);
//Old code converts 4:2:2 directly to RGBA (single threaded.)
//TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch,
// info, &decoder->scratch, chroma_offset, precision);
#endif
return CODEC_ERROR_OKAY;
default:
// else Return the error code for unsupported output format
break;
}
}
}
// The output format is not supported by this routine
error = CODEC_ERROR_UNSUPPORTED_FORMAT;
return error;
}
// Routines for converting the new encoded formats to the requested output format
CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch)
{
CODEC_ERROR error = CODEC_ERROR_OKAY;
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
FRAME_INFO *info = &decoder->frame;
CODEC_STATE *codec = &decoder->codec;
int num_channels = codec->num_channels;
//int progressive = codec->progressive;
TRANSFORM **transform_array = decoder->transform;
//IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
//IMAGE *wavelet;
//int wavelet_width;
//int wavelet_height;
int decoded_width = 0;
int decoded_height = 0;
int resolution = info->resolution;
//int chroma_offset = decoder->codec.chroma_offset;
//int decoded_scale;
#if _ALLOCATOR
ALLOCATOR *allocator = decoder->allocator;
#endif
//TODO: Eliminate use of the chroma offset
if (decoder == NULL)
{
return CODEC_ERROR_INVALID_ARGUMENT;
}
// This routine should only be called for progressive frames
assert(codec->progressive);
// The decoder can decode a video sample without returning a frame
if (output == NULL || pitch == 0)
{
return CODEC_ERROR_OKAY;
}
// Does this frame have to be reconstructed?
if ((decoder->flags & DECODER_FLAGS_RENDER) == 0)
{
return CODEC_ERROR_OKAY;
}
// Check that the requested frame is within the limits of the group of frames
assert(0 <= frame && frame < decoder->gop_length);
// Check that the frame resolution is valid
assert(IsValidFrameResolution(resolution));
if (!IsValidFrameResolution(resolution))
{
return CODEC_ERROR_RESOLUTION;
}
// Compute the decoded width and height
ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height);
assert(decoded_width > 0 && decoded_height > 0);
if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32)
{
output += (info->height - 1) * pitch;
pitch = -pitch;
}
#if (0 && DEBUG)
if (logfile)
{
IMAGE *wavelet = transform[0]->wavelet[frame];
int band = 0;
fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band);
DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile);
}
#endif
// Check that the requested frame is large enough to hold the decoded frame
#if (0 && DEBUG)
//if (! (info->width >= decoded_width))
{
if (logfile)
{
//fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width);
fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width);
}
}
#endif
assert(info->width >= decoded_width);
if (!(info->width >= decoded_width))
{
return CODEC_ERROR_FRAMESIZE;
}
// assert((info->height+7)/8 >= (decoded_height+7)/8);
// if (!(info->height+7)/8 >= (decoded_height+7)/8) {
// return CODEC_ERROR_FRAMESIZE;
// }
START(tk_convert);
if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY)
{
//int precision = codec->precision;
int scale = 13;
int channel;
IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS];
int chroma_offset = decoder->codec.chroma_offset;
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
for (channel = 0; channel < num_channels; channel++)
{
lowpass_images[channel] = transform_array[channel]->wavelet[5];
if (lowpass_images[channel] == NULL) // therefore IntreFrame compressed.
{
scale = 12;
lowpass_images[channel] = transform_array[channel]->wavelet[2];
}
}
CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset,
scale, decoder->codec.encoded_format, decoder->frame.white_point);
}
else
// Quarter resolution
if (resolution == DECODED_RESOLUTION_QUARTER)
{
// Output quarter resolution for the two frame GOP
int precision = codec->precision;
// Reconstruct the frame to quarter resolution
ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch,
info, &decoder->scratch, precision);
// Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame
}
else
// Half resolution
if (resolution == DECODED_RESOLUTION_HALF)
{
IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS];
int precision = codec->precision;
int chroma_offset = 0;
int channel;
if (decoder->use_active_metadata_decoder)
{
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
mailbox->framenum = frame;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
// Get the first level wavelet in each channel
for (channel = 0; channel < num_channels; channel++)
{
wavelet_array[channel] = transform_array[channel]->wavelet[frame];
}
// Pack the pixels from the lowpass band in each channel into the output buffer
CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch,
info, chroma_offset, precision);
}
}
// Full resolution or half horizontal
else
{
int chroma_offset = 0;
int precision = codec->precision;
// Reconstruct the output frame from a full resolution decode
//assert(resolution == DECODED_RESOLUTION_FULL);
if (decoder->use_active_metadata_decoder)
{
int frame_size, channels = 3;
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
channels = 4;
frame_size = info->width * info->height * channels * 2;
if (decoder->RGBFilterBuffer16 == NULL || decoder->RGBFilterBufferSize < frame_size)
{
#if _ALLOCATOR
if (decoder->RGBFilterBuffer16)
{
FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16);
#else
if (decoder->RGBFilterBuffer16)
{
MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16);
decoder->RGBFilterBuffer16 = NULL;
}
decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16);
#endif
assert(decoder->RGBFilterBuffer16 != NULL);
if (! (decoder->RGBFilterBuffer16 != NULL))
{
return CODEC_ERROR_MEMORY_ALLOC;
}
decoder->RGBFilterBufferSize = frame_size;
}
#if _THREADED
TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, chroma_offset, precision);
#else
// Decode that last transform to rows of Bayer data (one row per channel)
TransformInverseSpatialToRow16u(transform_array, frame, num_channels,
(uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2,
info, &decoder->scratch, chroma_offset, precision);
#endif
#if _THREADED
{
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->jobType = JOB_TYPE_OUTPUT;
decoder->RGBFilterBufferPhase = 1;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
decoder->RGBFilterBufferPhase = 0;
}
#endif
}
else
{
//DAN20081203 -- fix for 444 decodes in AE32-bit float
decoder->frame.white_point = 16;
//decoder->frame.signed_pixels = 0;
switch (info->format)
{
case DECODED_FORMAT_B64A:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2B64A);
#else
TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YU64: //TODO : Threading
TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32:
case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
break;
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2RG30);
#else
TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision);
#endif
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch,
info, &decoder->scratch, chroma_offset, precision,
InvertHorizontalStripRGB16sToPackedYUV8u);
#endif
break;
case DECODED_FORMAT_R408:
case DECODED_FORMAT_V408:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGBA2YUVA);
#else
assert(0);
#endif
break;
case DECODED_FORMAT_YR16:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YR16);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_V210:
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2v210);
#else
assert(0);// missing non-threaded version
#endif
break;
case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR
#if _THREADED
TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels,
output, pitch,
info, chroma_offset, precision,
InvertHorizontalStrip16sRGB2YUV);
#else
assert(0);// missing non-threaded version
#endif
break;
//TODO: Add code to handle other Avid pixel formats
case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT
case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8
case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14
case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6
assert(0);
break;
default:
#if (DEBUG)
if (logfile)
{
fprintf(logfile, "Invalid decoded format: %d\n", info->format);
}
#endif
assert(0);
error = CODEC_ERROR_INVALID_FORMAT;
break;
}
}
}
STOP(tk_convert);
return error;
}
// Convert 16-bit signed lowpass data into the requested output format
void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels,
uint8_t *output_buffer, int32_t output_pitch,
FRAME_INFO *info, int chroma_offset,
int precision)
{
bool inverted = false;
int output_width = info->width;
int output_height = info->height;
int format = info->format;
// Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values
const int shift = 16 - precision - PRESCALE_LUMA;
START(tk_convert);
#if 0
// Fill the output buffer with blank values
EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format);
#endif
// Determine the type of conversion
switch (info->format)
{
case DECODED_FORMAT_RGB24:
case DECODED_FORMAT_RGB32:
inverted = true;
case DECODED_FORMAT_RGB24_INVERTED:
case DECODED_FORMAT_RGB32_INVERTED:
case DECODED_FORMAT_B64A:
case DECODED_FORMAT_R210:
case DECODED_FORMAT_DPX0:
case DECODED_FORMAT_RG30:
case DECODED_FORMAT_AR10:
case DECODED_FORMAT_AB10:
case DECODED_FORMAT_RG48:
case DECODED_FORMAT_RG64: //WIP
ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height,
output_pitch, format, inverted, shift, num_channels);
break;
case DECODED_FORMAT_YUYV:
case DECODED_FORMAT_UYVY:
{
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
if (info->format == COLOR_FORMAT_YUYV)
{
ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
else if (info->format == COLOR_FORMAT_UYVY)
{
ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0],
r_image->pitch, g_image->pitch, b_image->pitch,
output_buffer, output_pitch,
output_width, output_height, 14,
info->colorspace, info->format);
}
}
break;
default:
{
int y;
IMAGE *g_image = image_array[0];
IMAGE *r_image = image_array[1];
IMAGE *b_image = image_array[2];
IMAGE *a_image = image_array[3];
unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr;
//unsigned short *scanline2 = scanline + output_width*3;
uint8_t *newline = (uint8_t *)output_buffer;
unsigned short *Rptr, *Gptr, *Bptr, *Aptr = NULL;
Rptr = (unsigned short *)r_image->band[0];
Gptr = (unsigned short *)g_image->band[0];
Bptr = (unsigned short *)b_image->band[0];
if (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format))
{
Aptr = (unsigned short *)a_image->band[0];
for (y = 0; y < output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width * 2);
memcpy(scanline + info->width, Gptr, info->width * 2);
memcpy(scanline + info->width * 2, Bptr, info->width * 2);
memcpy(scanline + info->width * 3, Aptr, info->width * 2);
Rptr += r_image->pitch / 2;
Gptr += g_image->pitch / 2;
Bptr += b_image->pitch / 2;
Aptr += a_image->pitch / 2;
Convert4444LinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
else
{
for (y = 0; y < output_height; y++)
{
int flags = (ACTIVEMETADATA_PLANAR);
int whitebitdepth = 14;
memcpy(scanline, Rptr, info->width * 2);
memcpy(scanline + info->width, Gptr, info->width * 2);
memcpy(scanline + info->width * 2, Bptr, info->width * 2);
Rptr += r_image->pitch / 2;
Gptr += g_image->pitch / 2;
Bptr += b_image->pitch / 2;
ConvertLinesToOutput(decoder, info->width, 1, y, scanline,
newline, output_pitch, info->format, whitebitdepth, flags);
newline += output_pitch;
}
}
}
//assert(0);
break;
}
STOP(tk_convert);
}
#if _THREADED
// Threaded inverse transform using the new threads API
void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
//TODO: Add support for more output formats
int format = DECODED_FORMAT_RGB32;
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Choose the correct inverse horizontal filter for the output format
switch (format)
{
case DECODED_FORMAT_RGB32:
horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32;
break;
default:
assert(0);
return;
}
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
#if (DEBUG)
if (logfile)
{
fprintf(logfile, "All worker threads signalled done\n");
}
#endif
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
HorizontalInverseFilterOutputProc horizontal_filter_proc;
horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar;
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Threaded inverse transform using the new threads API
// Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format
void TransformInverseSpatialUniversalThreadedToOutput(
DECODER *decoder, int frame_index, int num_channels,
uint8_t *output, int pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
// The upper and lower spatial transforms only share the middle rows
int transform_height = (((info->height + 7) / 8) * 8) / 2;
int middle_row_count = transform_height;
// Data structure for passing information to the worker threads
WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data;
// Inverse horizontal filter that outputs the desired format
#if _DELAY_THREAD_START
if (decoder->worker_thread.pool.thread_count == 0)
{
CreateLock(&decoder->worker_thread.lock);
// Initialize the pool of transform worker threads
ThreadPoolCreate(&decoder->worker_thread.pool,
decoder->thread_cntrl.capabilities >> 16/*cpus*/,
WorkerThreadProc,
decoder);
}
#endif
// Post a message to the mailbox
mailbox->horizontal_filter_proc = horizontal_filter_proc;
mailbox->frame = frame_index;
mailbox->num_channels = num_channels;
mailbox->output = output;
mailbox->pitch = pitch;
memcpy(&mailbox->info, info, sizeof(FRAME_INFO));
mailbox->chroma_offset = chroma_offset;
mailbox->precision = precision;
mailbox->jobType = JOB_TYPE_WAVELET;
// Set the work count to the number of rows to process
ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count);
// Start the transform worker threads
ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START);
// Wait for all of the worker threads to finish
ThreadPoolWaitAllDone(&decoder->worker_thread.pool);
}
// Routines for the worker threads that use the new threads API
void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index,
int frame_index, int num_channels,
uint8_t *output_buffer, int output_pitch, FRAME_INFO *info,
int chroma_offset, int precision,
HorizontalInverseFilterOutputProc horizontal_filter_proc)
{
#if (DEBUG)
FILE *logfile = decoder->logfile;
#endif
TRANSFORM **transform = decoder->transform;
const SCRATCH *scratch = &decoder->scratch;
PIXEL *lowlow_band[CODEC_MAX_CHANNELS];
PIXEL *lowhigh_band[CODEC_MAX_CHANNELS];
PIXEL *highlow_band[CODEC_MAX_CHANNELS];
PIXEL *highhigh_band[CODEC_MAX_CHANNELS];
int lowlow_pitch[CODEC_MAX_CHANNELS];
int lowhigh_pitch[CODEC_MAX_CHANNELS];
int highlow_pitch[CODEC_MAX_CHANNELS];
int highhigh_pitch[CODEC_MAX_CHANNELS];
int channel_width[CODEC_MAX_CHANNELS];
uint8_t *output_row_ptr;
uint8_t *plane_array[TRANSFORM_MAX_CHANNELS];
int plane_pitch[TRANSFORM_MAX_CHANNELS];
int output_width = info->width;
int output_height = info->height;
int half_height = output_height / 2;
int luma_band_width;
ROI strip;
char *bufptr;
int last_row;
int last_display_row;
int last_line;
int channel;
int row;
int odd_display_lines = 0;
THREAD_ERROR error;
// Push the scratch space state to allocate a new section
char *buffer = scratch->free_ptr;
size_t buffer_size = scratch->free_size;
//TODO: Replace uses of buffer variables with calls to the scratch space API
// This version is for 16-bit pixels
assert(sizeof(PIXEL) == 2);
// Must have a valid inverse horizontal filter
assert(horizontal_filter_proc != NULL);
// Check for enough space in the local array allocations
// assert(num_channels <= CODEC_NUM_CHANNELS);
assert(num_channels <= TRANSFORM_MAX_CHANNELS);
// Divide the buffer space between the four threads
buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4
buffer += buffer_size * thread_index;
// Round the buffer pointer up to the next cache line
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK));
bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE);
// Allocate buffer space for the output rows from each channel
for (channel = 0; channel < num_channels; channel++)
{
// Get the row width for this channel
IMAGE *wavelet = transform[channel]->wavelet[frame_index];
int width = wavelet->width;
int height = wavelet->height;
//int pitch = wavelet->pitch;
size_t channel_buffer_size;
// Compute the width and pitch for the output rows stored in this buffer
int buffer_width = 2 * width;
int buffer_height = 2;
int buffer_pitch = ALIGN16(buffer_width);
// Compute the total allocation for this channel
channel_buffer_size = buffer_height * buffer_pitch;
// Check that there is enough space available
assert(channel_buffer_size <= buffer_size);
// Allocate the buffer for this channel
plane_array[channel] = (uint8_t *)bufptr;
// Remember the pitch for rows in this channel
plane_pitch[channel] = buffer_pitch;
// Advance the buffer pointer past the allocated space for this channel
bufptr += channel_buffer_size;
// Reduce the amount of space remaining in the buffer
buffer_size -= channel_buffer_size;
// The dimensions of the output image are the same as the luma channel
if (channel == 0)
{
strip.width = buffer_width;
strip.height = buffer_height;
last_row = height;
//DAN20050606 Added to fix issue with non-div by 8 heihts.
last_display_row = (info->height + 1) / 2; // DAN20090215 -- fix for odd display lines.
odd_display_lines = info->height & 1;
// Remember the width of the wavelet bands for luma
luma_band_width = width;
}
// Save the bands per channel for routines that process all channels at once
lowlow_band[channel] = wavelet->band[0];
lowhigh_band[channel] = wavelet->band[1];
highlow_band[channel] = wavelet->band[2];
highhigh_band[channel] = wavelet->band[3];
lowlow_pitch[channel] = wavelet->pitch;
lowhigh_pitch[channel] = wavelet->pitch;
highlow_pitch[channel] = wavelet->pitch;
highhigh_pitch[channel] = wavelet->pitch;
// Remember the width of the wavelet for this channel
channel_width[channel] = width;
}
// Use the remaining buffer space for intermediate results
buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK));
buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE);
if (last_row == last_display_row)
{
last_line = half_height - 1;
}
else
{
last_line = half_height;
}
if (odd_display_lines)
last_line++;
if (thread_index == TRANSFORM_WORKER_TOP_THREAD)
{
// Process the first row
row = 0;
output_row_ptr = output_buffer;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the first row using special border filters for the top row
InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc);
}
if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1)
{
if (last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash
{
int pitch = output_pitch;
// Process the last row
row = last_row - 1;
if (decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if (decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC)
pitch >>= 1;
// Begin filling the last output row with results
output_row_ptr = output_buffer + row * 2 * pitch;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
// Process the last row using special border filters for the bottom row
if (decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV)
if (decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix
output_row_ptr -= output_pitch;
InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision, odd_display_lines,
horizontal_filter_proc);
}
}
// Loop until all of the middle rows have been processed
for (;;)
{
int work_index;
int row;
// Wait for one row from each channel to process
error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index);
// Is there another row to process?
if (error == THREAD_ERROR_OKAY)
{
int pitch = output_pitch;
// Compute the next row to process from the work index
row = work_index + 1;
if (decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work
if (decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked
pitch >>= 1;
// Compute the output row corresponding to this row index
output_row_ptr = output_buffer + row * 2 * pitch;
}
else
{
// No more work to do
return;
}
// Is the row inside the top and bottom border?
if (0 < row && row < last_line)
{
int outputlines = 2;
#if (0 && DEBUG)
if (logfile)
{
fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row);
}
#endif
if (odd_display_lines && row == last_line - 1)
{
outputlines = 1;
}
// Process the middle row using the normal wavelet filters
InvertSpatialMiddleRow16sToOutput(decoder, thread_index,
lowlow_band, lowlow_pitch,
lowhigh_band, lowhigh_pitch,
highlow_band, highlow_pitch,
highhigh_band, highhigh_pitch,
output_row_ptr, output_pitch,
output_width, info->format, info->colorspace,
row, channel_width,
(PIXEL *)buffer, buffer_size,
precision,
horizontal_filter_proc,
outputlines);
}
}
}
#endif //_THREADED
bool GetTuplet(unsigned char *data, int datasize,
unsigned short findtag, unsigned short *retvalue)
{
bool ret = false;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag, value;
int error = 0;
//char t[100];
InitBitstream(&myinput);
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
pinput = &myinput;
do
{
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
if (tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag & 0xff) << 16);
}
else if (tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if (tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if ((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if (tag == (int)findtag)
{
*retvalue = value;
ret = true;
break;
}
if ((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if ((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if ((tag & 0xff00) == 0x2100) //level
skip = 0;
if (chunksize)
{
if (chunksize * 4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if (skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize * 4;
pinput->nWordsUsed -= chunksize * 4;
}
}
}
else
{
error = 1;
}
} while (tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed > 0 && !error);
return ret;
}
uint8_t *GetTupletAddr(uint8_t *data,
int datasize,
uint16_t findtag,
int16_t *retvalue)
{
unsigned char *ret = NULL;
BITSTREAM myinput, *pinput;
TAGVALUE segment;
TAGWORD tag, value;
int error = 0;
if (data == NULL || datasize == 0)
{
return NULL;
}
//InitBitstream(&myinput);
memset(&myinput, 0, sizeof(BITSTREAM));
myinput.lpCurrentWord = data;
myinput.nWordsUsed = datasize;
myinput.nBitsFree = BITSTREAM_LONG_SIZE;
pinput = &myinput;
do
{
bool optional = false;
int chunksize = 0;
// Read the next tag value pair from the bitstream
segment = GetSegment(pinput);
tag = segment.tuple.tag;
value = segment.tuple.value;
// Is this an optional tag?
if (tag < 0)
{
tag = NEG(tag);
optional = true;
}
if (tag & 0x2000)
{
chunksize = value;
chunksize &= 0xffff;
chunksize += ((tag & 0xff) << 16);
}
else if (tag & 0x4000)
{
chunksize = value;
chunksize &= 0xffff;
}
else if (tag == CODEC_TAG_INDEX)
{
chunksize = value;
chunksize &= 0xffff;
}
else
{
chunksize = 0;
}
if ((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000)
{
int skip = 1;
error = 0;
if (tag == (int)findtag)
{
*retvalue = value;
ret = pinput->lpCurrentWord;
break;
}
if ((tag & 0xff00) == 0x2200) //sample size
{
chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only.
skip = 0;
}
if ((tag & 0xff00) == 0x2300) //uncompressed sample size
{
skip = 1;
}
if ((tag & 0xff00) == 0x2100) //level
skip = 0;
if (chunksize)
{
if (chunksize * 4 > pinput->nWordsUsed || chunksize < 0)
{
break;
}
if (skip)
{
//unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord;
pinput->lpCurrentWord += chunksize * 4;
pinput->nWordsUsed -= chunksize * 4;
}
}
}
else
{
error = 1;
}
} while (tag != CODEC_TAG_GROUP_TRAILER &&
tag != CODEC_TAG_FRAME_TRAILER &&
pinput->nWordsUsed > 0 && !error);
return ret;
}
|
smod.c |
#ifndef _SMOD_H_
#include "smod.h"
#endif
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* brinv -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
int brinv (double *a,int n)
{
int *is, *js, i, j, k, l, u, v, rk;
double d, p;
is = (int *)malloc (n * sizeof(int));
js = (int *)malloc (n * sizeof(int));
for (k = 0; k <= n - 1; k++)
{
d=0.0;
for (i = k; i <= n - 1; i++)
{
for (j = k; j <= n - 1; j++)
{
l = i * n + j;
p = fabs (a[l]);
if (p > d)
{
d = p;
is[k] = i;
js[k] = j;
}
}
}
if (d + 1.0 == 1.0)
{
free (is);
free (js);
rk = brank(a, n, n);
printf ("error: Matrix is ill-conditioned:\n");
printf (" not inv\n");
printf (" rank = %d\n", rk);
exit(0);
}
if (is[k] != k)
{
for (j = 0; j <= n - 1; j++)
{
u = k * n + j;
v = is[k] * n + j;
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
if (js[k] != k)
{
for (i = 0; i <= n - 1; i++)
{
u = i * n + k;
v = i * n + js[k];
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
l = k * n + k;
a[l] = 1.0 / a[l];
for (j = 0; j <= n - 1; j++)
{
if (j != k)
{
u = k * n + j;
a[u] = a[u] * a[l];
}
}
for (i = 0; i <= n - 1; i++)
{
if (i != k)
for (j = 0; j <= n - 1; j++)
if (j != k)
{
u = i * n + j;
a[u] = a[u] - a[i * n + k] * a[k * n + j];
}
}
for (i = 0; i <= n - 1; i++)
{
if (i != k)
{
u = i * n + k;
a[u] = - a[u] * a[l];
}
}
}
for (k = n - 1; k >= 0; k--)
{
if (js[k] != k)
{
for (j = 0; j <= n - 1; j++)
{
u = k * n + j;
v = js[k] * n + j;
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
if (is[k] != k)
{
for (i = 0; i <= n - 1; i++)
{
u = i * n + k;
v = i * n + is[k];
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
}
free(is);
free(js);
return(1);
}
int brank(double *a, int m, int n)
{ int i,j,k,nn,is,js,l,ll,u,v;
double q,d;
nn=m;
if (m>=n) nn=n;
k=0;
for (l=0; l<=nn-1; l++)
{ q=0.0;
for (i=l; i<=m-1; i++)
for (j=l; j<=n-1; j++)
{ ll=i*n+j; d=fabs(a[ll]);
if (d>q) { q=d; is=i; js=j;}
}
if (q+1.0==1.0) return(k);
k=k+1;
if (is!=l)
{ for (j=l; j<=n-1; j++)
{ u=l*n+j; v=is*n+j;
d=a[u]; a[u]=a[v]; a[v]=d;
}
}
if (js!=l)
{ for (i=l; i<=m-1; i++)
{ u=i*n+js; v=i*n+l;
d=a[u]; a[u]=a[v]; a[v]=d;
}
}
ll=l*n+l;
for (i=l+1; i<=n-1; i++)
{ d=a[i*n+l]/a[ll];
for (j=l+1; j<=n-1; j++)
{ u=i*n+j;
a[u]=a[u]-d*a[l*n+j];
}
}
}
return(k);
}
void choldc(double *a, int n, double p[])
/*
* Given a positive-definite symmetric matrix a[1..n][1..n],
* this routine constructs its Cholesky decomposition, A = L · LT .
* On input, only the upper triangle of a need be given; it is not modified.
* The Cholesky factor L is returned in the lower triangle of a,
* except for its diagonal elements which are returned in p[1..n].
*
*/
{
int i,j,k, rk;
double sum;
for (i=0;i<n;i++)
{
for (j=i;j<n;j++)
{
sum=a[i * n + j];
for (k=i-1;k>=0;k--)
sum -= a[i * n + k]*a[j * n + k];
if (i == j)
{
// printf("i = %d\tj = %d\tsum = %e\n", i, j, sqrt(sum));
if (sum <= 0.0)
{
rk = brank(a, n, n);
printf("error: Matrix is not positive definite:\n");
printf(" i = %d\tj = %d\tsum = %e\n", i, j, sum);
printf(" rank = %d\n", rk);
exit(0);
}
p[i]=sqrt(sum);
}
else a[j * n + i]=sum/p[i];
}
}
}
void cholsl(double *a, int n, double p[], double b[], double x[])
/*
* Solves the set of n linear equations A · x = b,
* where a is a positive-definite symmetric matrix.
* a[1..n][1..n] and p[1..n] are input as the output of the routine choldc.
* Only the lower subdiagonal portion of a is accessed.
* b[1..n] is input as the right-hand side vector.
* The solution vector is returned in x[1..n].
* a, n, and p are not modified and can be left in place for
* successive calls with different right-hand sides b.
* b is not modified unless you identify b and x in the calling sequence,
* which is allowed.
*
*/
{
int i,k;
double sum;
for (i=0;i<n;i++)
{
for (sum=b[i],k=i-1;k>=0;k--)
sum -= a[i * n + k]*x[k];
x[i]=sum/p[i];
}
for (i=n-1;i>=0;i--)
{
for (sum=x[i],k=i+1;k<n;k++)
sum -= a[k * n + i]*x[k];
x[i]=sum/p[i];
}
}
///////////********************////////////////
void solvels_chol(double *a, int n, double *y, double *x, int nocov)
{
double *p, sum;
int i, k, j;
p = (double *)calloc (n, sizeof(double));
choldc(a, n, p);
cholsl(a, n, p, y, x);
if (nocov == 1)
{
free (p);
return;
}
for (i=0;i<n;i++)
{
a[i * n + i]=1.0/p[i];
for (j=i+1;j<n;j++)
{
sum=0.0;
for (k=i;k<j;k++)
sum -= a[j * n + k]*a[k * n + i];
a[j * n + i]=sum/p[j];
}
}
for (i = 0; i <= n - 1; i++)
{
for (j = i; j <= n - 1; j++)
{
sum = 0.0;
for (k = j; k <= n - 1; k++)
sum = sum + a[k * n + i] * a[k * n + j];
a[i * n + j] = sum;
}
}
for (i = 0; i <= n - 1; i++)
{
for (j = 0; j <= i - 1; j++)
{
a[i * n + j] = 0;
}
}
free(p);
return;
}
void solvegaus(double *a, int n, double *y, double *x)
{
brinv(a, n);
brmul(a, y, n, n, 1, x);
}
void pt_orb (double ts_orb, double te_orb, double step_orb, int dim_eph)
{
FILE *fp_fxyz, *fp_faei, *fp_frtn, *fp_fllh;
int i, n;
double tt, lps, utc, xtm[6], *eph, dist, velt, tp, rtn_p[3], rtn_v[3],
ele[6], llh[3];
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--print orbit--*/
if((fp_fxyz=fopen("forb.xyz","w"))==NULL)
{
printf("Cannot write fort.xyz!\n");
getch();
exit(0);
}
if((fp_faei=fopen("forb.aei","w"))==NULL)
{
printf("Cannot write fort.aei!\n");
getch();
exit(0);
}
if((fp_frtn=fopen("forb.rtn","w"))==NULL)
{
printf("Cannot write fort.rtn!\n");
getch();
exit(0);
}
if((fp_fllh=fopen("forb.llh","w"))==NULL)
{
printf("Cannot write fort.llh!\n");
getch();
exit(0);
}
eph = (double *) calloc (dim_eph - 1, sizeof(double));
i = 0;
for (utc = ts_orb; utc <= te_orb; utc = ts_orb + step_orb * i)
{
lps = getlps (JD0 + ts_orb/86400.0);
tt = utc + (lps + 32.184);
lagrange (OR_EPH, DIM_OR, dim_eph, tt, eph);
for (n = 0; n < 6; n++)
xtm[n] = eph[n];
dist = sqrt (xtm[0] * xtm[0] + xtm[1] * xtm[1] + xtm[2] * xtm[2]);
velt = sqrt (xtm[3] * xtm[3] + xtm[4] * xtm[4] + xtm[5] * xtm[5]);
tp = xyz2aei(ele, &xtm[0], &xtm[3]);
xyz2rtn(&xtm[0], &xtm[3], &xtm[0], rtn_p);
xyz2rtn(&xtm[0], &xtm[3], &xtm[3], rtn_v);
xyz2llh(xtm, llh);
fprintf (fp_fxyz, "%14.4f %14.6f %26.14f %26.14f %26.14f ",
JD0, utc, xtm[0], xtm[1], xtm[2]);
fprintf (fp_fxyz, "%24.16f %24.16f %24.16f %16.4f %14.6f \n",
xtm[3], xtm[4], xtm[5], dist, velt);
fprintf (fp_faei, "%14.4f %14.6f %26.14f %10.6f %12.6f ",
JD0, utc, ele[0], ele[1], ele[2]);
fprintf (fp_faei, "%12.6f %12.6f %12.4f %12.4f \n",
ele[3], ele[4], ele[5], tp);
fprintf (fp_frtn, "%14.4f %14.6f %16.4f %16.4f %16.4f ",
JD0, utc, rtn_p[0], rtn_p[1], rtn_p[2]);
fprintf (fp_frtn, "%14.6f %14.6f %14.6f %16.4f %14.6f \n",
rtn_v[0], rtn_v[1], rtn_v[2], dist, velt);
fprintf (fp_fllh, "%14.4f %14.6f %26.14f %26.14f %26.14f\n",
JD0, utc, llh[0], llh[1], llh[2] - RCT);
i++;
}
fclose(fp_fxyz);
fclose(fp_faei);
fclose(fp_frtn);
fclose(fp_fllh);
free (eph);
return;
}
double mgrn1(double u,double g,double *r)
{ int i,m;
double s,w,v,t;
s=65536.0; w=2053.0; v=13849.0;
t=0.0;
for (i=1; i<=12; i++)
{ *r=(*r)*w+v; m=(int)(*r/s);
*r=*r-m*s; t=t+(*r)/s;
}
t=u+g*(t-6.0);
return(t);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* geteop - interpolation of eop
* mjd: double, input MJD
* xp, yp, ut1_utc, dx, dy: output EOP
* http://hpiers.obspm.fr/iers/eop/eopc04_05/eopc04_IAU2000.62-now
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void geteop (double mjd, double *xp, double *yp,
double *ut1_utc, double *dx, double *dy)
{
int i, mjdi;
double x1, y1, dt1, dx1, dy1, x2, y2, dt2, dx2, dy2;
for (i = 0; i < NEOP; i ++)
{
mjdi = (int)EOPMT[i * 6 + 0];
if (mjdi == (int)mjd)
{
x1 = EOPMT[i * 6 + 1];
y1 = EOPMT[i * 6 + 2];
dt1 = EOPMT[i * 6 + 3];
dx1 = EOPMT[i * 6 + 4];
dy1 = EOPMT[i * 6 + 5];
i++;
x2 = EOPMT[i * 6 + 1];
y2 = EOPMT[i * 6 + 2];
dt2 = EOPMT[i * 6 + 3];
dx2 = EOPMT[i * 6 + 4];
dy2 = EOPMT[i * 6 + 5];
break;
}
}
*xp = x1 + (x2-x1) * (mjd - mjdi);
*yp = y1 + (y2-y1) * (mjd - mjdi);
*ut1_utc = dt1 + (dt2-dt1) * (mjd - mjdi);
*dx = dx1 + (dx2-dx1) * (mjd - mjdi);
*dy = dy1 + (dy2-dy1) * (mjd - mjdi);
}
double getinfo(double *tjd, InfStruct *info)
{
int n;
double gmsth, ux[3] = {1,0,0}, uy[3] = {0,1,0}, uz[3] = {0,0,1},
tx[3], ty[3], tz[3], xp, yp, ut1_utc, dx, dy;
info->jd0 = tjd[0];
info->tt = tjd[1] * 86400.0;
info->jdt = info->jd0 + info->tt / 86400.0;
info->leaps = getlps (info->jdt);
info->utc = info->tt - info->leaps - 32.184;
if (CT != 2)
{
iau_pns(tjd, info->c_ei, CT);
mt(info->c_ei, 3, 3, info->c_ie);
return 1;
}
info->mjd = info->jd0 - 2400000.5 + info->utc/86400.0;
geteop (info->mjd, &xp, &yp, &ut1_utc, &dx, &dy);
info->xp = xp;
info->yp = yp;
info->ut1_utc = ut1_utc;
info->dx = dx;
info->dy = dy;
info->deltat = 32.184 + info->leaps - info->ut1_utc;
info->ut1 = info->utc + info->ut1_utc;
sidereal_time (info->jd0, info->ut1/86400.0, info->deltat,0,1,1, &gmsth);
info->gmst = gmsth / 24 * 360.0 * DEG2RAD;
cel_pole (info->jdt, 2, info->dx * 1e3, info->dy * 1e3);
cel2ter (info->jd0, info->ut1 / 86400.0, info->deltat, 1, 1, 0,
info->xp, info->yp, ux, tx);
cel2ter (info->jd0, info->ut1 / 86400.0, info->deltat, 1, 1, 0,
info->xp, info->yp, uy, ty);
cel2ter (info->jd0, info->ut1 / 86400.0, info->deltat, 1, 1, 0,
info->xp, info->yp, uz, tz);
for (n = 0; n < 3; n++)
{
info->c_ie[n*3] = tx[n];
info->c_ie[n*3+1] = ty[n];
info->c_ie[n*3+2] = tz[n];
}
mt(info->c_ie, 3, 3, info->c_ei);
// printf ("%d\tjd0 = %.10f\t ut1 = %.10f\t utc = %.10f\n", i,info[i].jd0, info[i].ut1, info[i].utc );
// for (n = 0; n < 9; n++)
// printf ("%e\n", info[i].c_ie[n]);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* itrf2gcrf(icrf) - from earth fixed to earth inertial
* jd: double, integral part of JD day, unit: day
* utc: double, fractional part of JD day, unit: seconds
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void itrf2gcrf(double jd, double utc, double *vt, double *vc)
{
double lps, mjd, xp, yp, ut1_utc, dx, dy, delta_t, ut1, tt;
mjd = jd - 2400000.5;
geteop (mjd + utc/86400.0, &xp, &yp, &ut1_utc, &dx, &dy);
lps = getlps (jd + utc/86400.0);
delta_t = 32.184 + lps - ut1_utc;
ut1 = utc + ut1_utc;
tt = utc + (lps + 32.184);
cel_pole (jd + tt / 86400.0, 2, dx * 1e3, dy * 1e3);
ter2cel (jd, ut1 / 86400.0, delta_t, 1, 1, 0,
xp, yp, vt, vc); /*--vc unit: m--*/
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* * gcrf(icrf)2itrf - from earth fixed to earth inertial
* * jd: double, integral part of JD day, unit: day
* * utc: double, fractional part of JD day, unit: seconds
* * @param2: description of param2
*
* * version: 20 Aug 2010
* */
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void gcrf2itrf(double jd, double utc, double *vc, double *vt)
{
double lps, mjd, xp, yp, ut1_utc, dx, dy, delta_t, ut1, tt;
mjd = jd - 2400000.5;
geteop (mjd + utc/86400.0, &xp, &yp, &ut1_utc, &dx, &dy);
lps = getlps (jd + utc/86400.0);
delta_t = 32.184 + lps - ut1_utc;
ut1 = utc + ut1_utc;
tt = utc + (lps + 32.184);
cel_pole (jd + tt / 86400.0, 2, dx * 1e3, dy * 1e3);
cel2ter (jd, ut1 / 86400.0, delta_t, 1, 1, 0,
xp, yp, vc, vt); /*--vc unit: m--*/
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
*
* getlps - get the leap seconds value for input JD
*
* jdutc: double, Julian Day of UTC
* return: short int, leap seconds
*
* version: Mar 2013
*
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
int getlps (double jdutc)
{
/*
*
1972 JAN 1 =JD 2441317.5 TAI-UTC= 10.0 S + (MJD - 41317.) X 0.0 S
1972 JUL 1 =JD 2441499.5 TAI-UTC= 11.0 S + (MJD - 41317.) X 0.0 S
1973 JAN 1 =JD 2441683.5 TAI-UTC= 12.0 S + (MJD - 41317.) X 0.0 S
1974 JAN 1 =JD 2442048.5 TAI-UTC= 13.0 S + (MJD - 41317.) X 0.0 S
1975 JAN 1 =JD 2442413.5 TAI-UTC= 14.0 S + (MJD - 41317.) X 0.0 S
1976 JAN 1 =JD 2442778.5 TAI-UTC= 15.0 S + (MJD - 41317.) X 0.0 S
1977 JAN 1 =JD 2443144.5 TAI-UTC= 16.0 S + (MJD - 41317.) X 0.0 S
1978 JAN 1 =JD 2443509.5 TAI-UTC= 17.0 S + (MJD - 41317.) X 0.0 S
1979 JAN 1 =JD 2443874.5 TAI-UTC= 18.0 S + (MJD - 41317.) X 0.0 S
1980 JAN 1 =JD 2444239.5 TAI-UTC= 19.0 S + (MJD - 41317.) X 0.0 S
1981 JUL 1 =JD 2444786.5 TAI-UTC= 20.0 S + (MJD - 41317.) X 0.0 S
1982 JUL 1 =JD 2445151.5 TAI-UTC= 21.0 S + (MJD - 41317.) X 0.0 S
1983 JUL 1 =JD 2445516.5 TAI-UTC= 22.0 S + (MJD - 41317.) X 0.0 S
1985 JUL 1 =JD 2446247.5 TAI-UTC= 23.0 S + (MJD - 41317.) X 0.0 S
1988 JAN 1 =JD 2447161.5 TAI-UTC= 24.0 S + (MJD - 41317.) X 0.0 S
1990 JAN 1 =JD 2447892.5 TAI-UTC= 25.0 S + (MJD - 41317.) X 0.0 S
1991 JAN 1 =JD 2448257.5 TAI-UTC= 26.0 S + (MJD - 41317.) X 0.0 S
1992 JUL 1 =JD 2448804.5 TAI-UTC= 27.0 S + (MJD - 41317.) X 0.0 S
1993 JUL 1 =JD 2449169.5 TAI-UTC= 28.0 S + (MJD - 41317.) X 0.0 S
1994 JUL 1 =JD 2449534.5 TAI-UTC= 29.0 S + (MJD - 41317.) X 0.0 S
1996 JAN 1 =JD 2450083.5 TAI-UTC= 30.0 S + (MJD - 41317.) X 0.0 S
1997 JUL 1 =JD 2450630.5 TAI-UTC= 31.0 S + (MJD - 41317.) X 0.0 S
1999 JAN 1 =JD 2451179.5 TAI-UTC= 32.0 S + (MJD - 41317.) X 0.0 S
2006 JAN 1 =JD 2453736.5 TAI-UTC= 33.0 S + (MJD - 41317.) X 0.0 S
2009 JAN 1 =JD 2454832.5 TAI-UTC= 34.0 S + (MJD - 41317.) X 0.0 S
2012 JUL 1 =JD 2456109.5 TAI-UTC= 35.0 S + (MJD - 41317.) X 0.0 S
*
*/
short int lps;
double jd = jdutc;
if (jd > 2456109.5)
lps = 35;
else if (jd > 2454832.5)
lps = 34;
else if (jd > 2453736.5)
lps = 33;
else if (jd > 2451179.5)
lps = 32;
else if (jd > 2450630.5)
lps = 31;
else if (jd > 2450083.5)
lps = 30;
else if (jd > 2449534.5)
lps = 29;
else if (jd > 2449169.5)
lps = 28;
else if (jd > 2448804.5)
lps = 27;
else if (jd > 2448257.5)
lps = 26;
else if (jd > 2447892.5)
lps = 25;
else if (jd > 2447161.5)
lps = 24;
else if (jd > 2446247.5)
lps = 23;
else if (jd > 2445516.5)
lps = 22;
else if (jd > 2445151.5)
lps = 21;
else if (jd > 2444786.5)
lps = 20;
else
{
printf ("No leapsecond configured before 1981 JUL 1 =JD 2444786.5\n");
exit (0);
}
return lps;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void openeop (char file_eop[200], int mjds, int num, double *eopmat)
{
FILE *fp_eop;
int i, mjdi;
char string[160];
if ((fp_eop = fopen (file_eop,"r")) == NULL)
{
printf ("Cannot open eop file?\n");
exit (0);
}
// for (i = 0; i < 13;i++)
// fgets (string, 160, fp_eop);
while (feof(fp_eop) == 0)
{
fgets (string, 160, fp_eop);
sscanf (string, "%*d%*d%*d%d", &mjdi);
if (mjdi == mjds - 1)
{
for (i = 0; i < num; i ++)
{
fgets (string, 160, fp_eop);
sscanf (string, "%*d%*d%*d%lf%lf%lf%lf%*lf%lf%lf",
&eopmat[i * 6 + 0], &eopmat[i * 6 + 1], &eopmat[i * 6 + 2],
&eopmat[i * 6 + 3], &eopmat[i * 6 + 4], &eopmat[i * 6 + 5]);
// printf("mjd = %f\n", eopmat[i * 6 + 0]);
}
break;
}
}
fclose (fp_eop);
}
void aei2xyz (double ele[6], double pos[3], double vel[3])
{
double a, e, i, omega, w, M, E,r, P[3], Q[3], n, GM, radius;
int x;
GM = GMCT;
radius = RCT;
a = ele[0];
e = ele[1];
i = ele[2] * DEG2RAD;
omega = ele[3] * DEG2RAD;
w = ele[4] * DEG2RAD;
M = ele[5] * DEG2RAD;
n=sqrt(GM/(a*a*a));
E=kepler(M,e);
P[0]=cos(omega)*cos(w)-sin(omega)*sin(w)*cos(i);
P[1]=sin(omega)*cos(w)+cos(omega)*sin(w)*cos(i);
P[2]=sin(w)*sin(i);
Q[0]=-cos(omega)*sin(w)-sin(omega)*cos(w)*cos(i);
Q[1]=-sin(omega)*sin(w)+cos(omega)*cos(w)*cos(i);
Q[2]=cos(w)*sin(i);
for(x=0;x<3;x++)
{
pos[x]=a*(cos(E)-e)*P[x]+a*sqrt(1-e*e)*sin(E)*Q[x];
}
r = modvect (pos);
if (r <= radius)
{
printf("error: r <= radius ! in aei2xyz \n");
}
for(x=0;x<3;x++)
{
vel[x]=-a*a*n/r*sin(E)*P[x]+a*a*n/r*sqrt(1-e*e)*cos(E)*Q[x];
}
}
double kepler(double M,double e)
{
double E0,E1=M;
do
{
E0=E1;
E1=M+e*sin(E0);
}
while(fabs(E0-E1)>=1e-10);
return(E1);
}
double xyz2aei(double ele[6], double pos[3], double vel[3])
{
double a, e, omega, i, w, E, M, r, v, h, HV[3], n,
GM, radius, Pz, Qz;
GM = GMCT;
radius = RCT;
r = modvect (pos);
v = modvect (vel);
if (r <= radius)
{
printf("error: r <= radius ! in xyz2aei \n");
}
a = 1.0 / (2.0 / r - v * v / GM);
crsvect (pos, vel, HV);
h = modvect(HV);
e = sqrt (1.0 - h * h / GM / a);
i = acos (HV[2] / h); //unit: rad
omega = chosephase (HV[0] / h / sin(i), - HV[1] / h / sin(i)); //unit: rad
if(a <= 0)
{
ele[0]=a,ele[1]=e,ele[2]=i * RAD2DEG;
ele[3]=omega * RAD2DEG,ele[4]=0,ele[5]=0;
// printf("error: a <= 0 !\n");
return 0;
}
if(a <= radius)
{
printf("warning: a <= radius !\n");
}
n = sqrt ( GM / (a*a*a) );
E = chosephase ( dotvect(pos, vel) / (a * a * n * e), (1.0 - r / a) / e); //unit: rad
M = E - e * sin(E); //unit: rad
Pz = (cos(E) / r * pos[2] - sin(E) / n / a * vel[2]);
Qz = (sin(E) / r / sqrt(1.0-e*e) * pos[2] + (cos(E) - e) / n / a / sqrt(1.0-e*e) * vel[2]);
w = chosephase ( Pz / sin(i), Qz /sin(i)); //unit: rad
ele[0] = a;
ele[1] = e;
ele[2] = i * RAD2DEG;
ele[3] = omega * RAD2DEG;
ele[4] = w * RAD2DEG;
ele[5] = M * RAD2DEG;
return TWOPI / n;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* opengravfile ¨C
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double opengrv (char file_grv[2][200], double *coef, int nmax, int mmax)
{
FILE *fp_grv;
double c,s;
int n,m, l, ind;
char string[200], name[20];
if ((fp_grv = fopen (file_grv[0],"r")) == NULL)
{
printf ("Cannot open gravity file?\n");
exit (0);
}
// coef[0] = 1; // include zero degree term
coef[0] = 0; // exclude zero degree term
while (1)
{
if (fgets (string, 200, fp_grv) == NULL) break;
// sscanf (string, "%d%d%lf%lf", &n, &m, &c, &s);
if (strlen(file_grv[1])==0)
{
sscanf (string, "%d%d%lf%lf", &n, &m, &c, &s);
}
else
{
sscanf (string, "%s", name);
if (strcmp (name,file_grv[1]) ==0)
{
sscanf (string, "%*s%d%d%lf%lf", &n, &m, &c, &s);
// printf ("n = %d m = %d c = %e s = %e\n", n, m, c, s);
}
}
// if (n > nmax || n < 0)
if (n > nmax || n < 2 || m > mmax) // permanently exclude degree 1 @7/24/2012
continue;
else if (m == 0)
{
coef[n] = c;
}
else
{
l = nmax - m + 1;
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
coef[ind + n - m] = c;
coef[ind + n - m + l] = s;
// coef[ind + n - m] = 0;
// coef[ind + n - m + l] = 0;
}
}
printf ("coef[2] = %e\n", coef[2]);
if (PERMT == 1)
{
coef[2] = coef[2] - 4.201e-9; //tn32
// coef[2] = coef[2] - 4.1736e-9; //tn36
}
printf ("coef[2] = %e\n", coef[2]);
fclose(fp_grv);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* xyz2llh - xyz to latitude, longitude, height
* @param1: description of param1
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void xyz2llh (double *vt, double *llh)
{
double r, cosphi, phi, costhe, sinthe;
r = sqrt (vt[0] * vt[0] + vt[1] * vt[1] + vt[2] * vt[2]);
cosphi = vt[2] / r;
phi = acos(cosphi) ;
costhe = vt[0] / r / sin(phi);
sinthe = vt[1] / r / sin(phi);
llh[2] = r;
llh[1] = chosephase(sinthe, costhe) * RAD2DEG;
llh[0] = 90.0 - phi * RAD2DEG;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_pointmass - abandoned
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double earth_pointmass (double jd, double tdbs, double *x, double *f)
{
double GM, radius, gmde, fnt[3], fgr[3], r, s2, rrd, a, b;
int n, gamma;
GM = GMCT;
radius = RCT;
gmde = GM * 86400.0 * 86400.0 / AU / AU / AU;
gamma = 1;
f[0] = x[3];
f[1] = x[4];
f[2] = x[5];
r = sqrt (x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
s2 = x[3] * x[3] + x[4] * x[4] + x[5] * x[5];
rrd = x[0] * x[3] + x[1] * x[4] + x[2] * x[5];
a = 2 * (1 + gamma) * gmde / r - gamma * s2;
b = 2 * (1 + gamma) * rrd;
for (n = 0; n < 3; n++)
fgr[n] = gmde / C_AUDAY / C_AUDAY / r / r / r
* ( a * x[n] + b * x[n+3] );
fnt[0] = - gmde / (r*r*r) * x[0];
fnt[1] = - gmde / (r*r*r) * x[1];
fnt[2] = - gmde / (r*r*r) * x[2];
for (n = 0; n < 3; n++)
{
f[3 + n] = fnt[n];
}
return 0;
}
double accel_point (double *tjd, double *x, double *fnt, double *fgr)
{
double GM, radius, gmde, r, s2, rrd, a, b;
int n, gamma;
GM = GMCT;
radius = RCT;
gmde = GM * 86400.0 * 86400.0 / AU / AU / AU;
gamma = 1;
r = sqrt (x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
s2 = x[3] * x[3] + x[4] * x[4] + x[5] * x[5];
rrd = x[0] * x[3] + x[1] * x[4] + x[2] * x[5];
a = 2 * (1 + gamma) * gmde / r - gamma * s2;
b = 2 * (1 + gamma) * rrd;
for (n = 0; n < 3; n++)
fgr[n] = gmde / C_AUDAY / C_AUDAY / r / r / r
* ( a * x[n] + b * x[n+3] );
fnt[0] = - gmde / (r*r*r) * x[0];
fnt[1] = - gmde / (r*r*r) * x[1];
fnt[2] = - gmde / (r*r*r) * x[2];
return 0;
}
double accel_pmiers (double *tjd, double *x, double *fnt, double *fgr)
{
double GME, GMS, J, Jv[3], beta, gamma, r, v2, pv, pJ, a, b, p[3], v[3],
pxv[3], vxJ[3], ps[3], vs[3], rs, vsxps[3], vsxpsxv[3],
term1[3], term2[3], term3[3];
int n;
short int sun = 10;
GME = GMCT; //m^3/s^2
J = 9.8e8; //m^2/s
gamma = 1;
beta = 1;
GMS = 1.32712442076e20; //m^3/s^2
GME = GME * 86400.0 * 86400.0 / AU / AU / AU;
GMS = GMS * 86400.0 * 86400.0 / AU / AU / AU;
J = J * 86400.0 / AU / AU;
Jv[0] = 0; Jv[1] = 0; Jv[2] = J;
p[0] = x[0]; p[1] = x[1]; p[2] = x[2];
v[0] = x[3]; v[1] = x[4]; v[2] = x[5];
r = modvect(p);
v2 = dotvect(v, v);
pv = dotvect(p, v);
pJ = dotvect(p, Jv);
crsvect(p, v, pxv);
crsvect(v, Jv, vxJ);
planet_ephemeris (tjd, CT, sun, ps, vs);
rs = modvect(ps);
crsvect(vs, ps, vsxps);
crsvect(vsxps, v, vsxpsxv);
a = 2 * (beta + gamma) * GME / r - gamma * v2;
b = 2 * (1 + gamma) * pv;
for (n = 0; n < 3; n++)
{
term1[n] = GME / C_AUDAY / C_AUDAY / r / r / r *
( a * p[n] + b * v[n] );
term2[n] = GME / C_AUDAY / C_AUDAY / r / r / r * (1 + gamma) *
( 3/r/r * pxv[n] * pJ + vxJ[n] );
term3[n] = - GMS / C_AUDAY / C_AUDAY / rs / rs / rs * (1 + 2 * gamma) *
vsxpsxv[n];
fgr[n] = term1[n]
+ term2[n] + term3[n];
// printf ("%15.12f\t%15.12f\t%15.12f\n", term1[n],term2[n],term2[n]);
}
fnt[0] = - GME / (r*r*r) * p[0];
fnt[1] = - GME / (r*r*r) * p[1];
fnt[2] = - GME / (r*r*r) * p[2];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullstate -transition matrix(36), orbit(6), sensitivity matrix(6*DYNPAR)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void fun_accel (int dim, double jd, double tt, double *state, double *fstate)
{
int n, i,k, part;
double tjd[2], xic[6], dfdx[36], dxdx0[36],
acc1[3], dadr1[9],
acc2[3], dadr2[9],
acc3[3], dadr3[9], dadsrpb[3], dadsrpt[3],
acc4[4], dadr4[9], dadk2[3],
acc[3], dadr[9],
fxic[6], fdxdx0[36];
double ap[3], an[3], ar[3], ag[3], apgr[3], angr[3], at[3], ao[3];
double *dadp, *dxdp, *dfdpp, *dfdp, *fdxdp;
tjd[0] = jd; tjd[1] = tt / 86400.0;
// tjd[0] = jd; tjd[1] = tt;
if (dim < 6)
{
printf ("error: fun_accel input dim < 6!\n");
exit (0);
}
else if (dim == 6)
part = 0;
else if (dim > 6)
{
part = 1;
}
for (n = 0; n < 6; n++)
{
xic[n] = state[n];
}
/* acc, partial to xyz: dadr, partial to parameters dadp*/
// accel_ntrel (tjd, xic, part, acc1, dadr1, dadp1);
// accel_nonsp (tjd, xic, part, acc2, dadr2, dadp2);
// accel_radpr (tjd, xic, part, acc3, dadr3, dadp3);
/*todo: air drag acc & partial to vxvyvz dadv*/
accel_pm_part (tjd, xic, ap, part, dadr1);
accel_nb_part (tjd, xic, an, part, dadr2);
accel_sr_part (tjd, xic, ar, part, dadr3, dadsrpb, dadsrpt);
accel_gt_part (tjd, xic, ag, part, dadr4, dadk2);
for (n = 0; n <= 2; n++)
{
acc[n] = ap[n] + an[n] + ar[n] + ag[n];
}
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
for (n = 0; n < 6; n++)
{
fstate[n] = fxic[n];
}
if (part == 0)
{
return;
}
for (n = 0; n < 36; n++)
{
dxdx0[n] = state[n + 6];
}
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr1[n] + dadr2[n] + dadr3[n] + dadr4[n];
// dadr[n] = dadr1[n];
}
for (n = 0; n < 36; n++)
{
dfdx[n] = 0;
}
dfdx[3] = 1;
dfdx[10] = 1;
dfdx[17] = 1;
for (n = 0; n < 3; n++)
{
dfdx[n + 18] = dadr[n];
dfdx[n + 24] = dadr[n + 3];
dfdx[n + 30] = dadr[n + 6];
}
brmul(dfdx, dxdx0, 6, 6, 6, fdxdx0);
for (n = 0; n < 36; n++)
{
fstate[n + 6] = fdxdx0[n];
}
if (MDYN == 0)
return;
dadp = (double *) calloc ( 3 * MDYN, sizeof(double));
dxdp = (double *) calloc ( 6 * MDYN, sizeof(double));
dfdpp = (double *) calloc ( 6 * MDYN, sizeof(double));
dfdp = (double *) calloc ( 6 * MDYN, sizeof(double));
fdxdp = (double *) calloc ( 6 * MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
{
dxdp[n] = state[n + 42];
}
i = 0;
if (MSRP > 0)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = dadsrpb[n];
}
i++;
}
if (MSRP > 1)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = dadsrpt[n];
}
i++;
}
if (MTK2 > 0)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = dadk2[n];
}
i++;
}
if (MGCS > 0)
{
for (k = 0; k < MGCS; k ++)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = CSinfo[k].dadcs[n];
}
i++;
}
}
brmul(dfdx, dxdp, 6, 6, MDYN, dfdpp);
for (n = 0; n < 3 * MDYN; n++)
{
dfdp[n] = 0;
dfdp[n + 3 * MDYN] = dadp[n];
}
for (n = 0; n < 6 * MDYN; n++)
{
fdxdp[n] = dfdpp[n] + dfdp[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 6 * MDYN; n++)
{
fstate[n + 42]= fdxdp[n];
}
free (dadp);
free (dxdp);
free (dfdpp);
free (dfdp);
free (fdxdp);
return;
}
double accel_pm_part (double *tjd, double *x, double *acc, int part, double *dadr)
{
double GME, GMS, J, Jv[3], beta, gamma, r, v2, pv, pJ, a, b, p[3], v[3],
pxv[3], vxJ[3], ps[3], vs[3], xsc[6], rs, vsxps[3], vsxpsxv[3],
unit[9], ppt[9], r5, r3, fgr[3], fnt[3], term1[3], term2[3], term3[3];
int n;
short int sun = 10;
GME = GMCT; //m^3/s^2
// GME = GME * 86400.0 * 86400.0 / AU / AU / AU;
p[0] = x[0]; p[1] = x[1]; p[2] = x[2];
v[0] = x[3]; v[1] = x[4]; v[2] = x[5];
r = modvect(p);
acc[0] = - GME / (r*r*r) * p[0];
acc[1] = - GME / (r*r*r) * p[1];
acc[2] = - GME / (r*r*r) * p[2];
if (part == 1)
{
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
r5 = pow (r, 5);
r3 = pow (r, 3);
brmul (p, p, 3,1,3, ppt);
for (n = 0; n <= 8; n++)
{
dadr[n] = 3 * GME * ppt[n] / r5
- GME * unit[n] / r3;
}
}
if (RELTIV == 0)
return 0;
J = 9.8e8; //m^2/s
gamma = 1;
beta = 1;
GMS = 1.32712442076e20; //m^3/s^2
// GMS = GMS * 86400.0 * 86400.0 / AU / AU / AU;
// J = J * 86400.0 / AU / AU;
Jv[0] = 0; Jv[1] = 0; Jv[2] = J;
v2 = dotvect(v, v);
pv = dotvect(p, v);
pJ = dotvect(p, Jv);
crsvect(p, v, pxv);
crsvect(v, Jv, vxJ);
// planet_ephemeris (tjd, CT, sun, ps, vs);
get_ephemeris (tjd, CT, sun, xsc);
for (n = 0; n < 3; n++)
{
ps[n] = xsc[n];
vs[n] = xsc[n + 3];
}
rs = modvect(ps);
crsvect(vs, ps, vsxps);
crsvect(vsxps, v, vsxpsxv);
a = 2 * (beta + gamma) * GME / r - gamma * v2;
b = 2 * (1 + gamma) * pv;
for (n = 0; n < 3; n++)
{
term1[n] = GME / C / C / r / r / r *
( a * p[n] + b * v[n] );
term2[n] = GME / C / C / r / r / r * (1 + gamma) *
( 3/r/r * pxv[n] * pJ + vxJ[n] );
term3[n] = - GMS / C / C / rs / rs / rs * (1 + 2 * gamma) *
vsxpsxv[n];
fgr[n] = term1[n]
+ term2[n] + term3[n];
// printf ("%15.12f\t%15.12f\t%15.12f\n", term1[n],term2[n],term2[n]);
}
acc[0] = acc[0] + fgr[0];
acc[1] = acc[1] + fgr[1];
acc[2] = acc[2] + fgr[2];
return 0;
}
int get_ephemeris (double tjd[2], int to, int from, double *x)
{
double jd0 = 2451545.00000000, lt, tdbj2000, fromTtoS[6], pos[3], vel[3];
int n;
short int center, target;
if (from <= 12 && to <= 12)
{
center = (short int)from;
target = (short int)to;
planet_ephemeris (tjd, target, center, pos, vel);
x[0] = pos[0] * AU;
x[1] = pos[1] * AU;
x[2] = pos[2] * AU;
x[3] = vel[0] * AU / 86400.0;
x[4] = vel[1] * AU / 86400.0;
x[5] = vel[2] * AU / 86400.0;
}
else if (from == I_TITAN || to == I_TITAN) //titan
{
tdbj2000 = ((tjd[0] - jd0) + tjd[1]) * 86400.0;
spkezr_c ("SATURN", tdbj2000, "J2000", "NONE", "TITAN", fromTtoS, <);
/*
Procedure
void spkezr_c ( ConstSpiceChar *targ,
SpiceDouble et,
ConstSpiceChar *ref,
ConstSpiceChar *abcorr,
ConstSpiceChar *obs,
SpiceDouble starg[6],
SpiceDouble *lt )
Return the state (position and velocity) of a target body
relative to an observing body, optionally corrected for light
time (planetary aberration) and stellar aberration.
*/
if (from == I_TITAN) center = (short int) to;
else center = (short int) from;
planet_ephemeris (tjd, center, 5, pos, vel);
for (n = 0; n < 3; n++)
{
x[n] = pos[n] * AU + fromTtoS[n] * 1000.0;
x[n + 3] = vel[n] * AU / 86400.0 + fromTtoS[n + 3] * 1000.0;
}
if (to == I_TITAN)
{
for (n = 0; n < 6; n++)
{
x[n] = - x[n];
}
}
}
else
{
printf ("error in get_ephemeris: from = %d\t to = %d\n", from, to);
}
return 0;
}
double accel_nb_part (double *tjd, double *xic, double *acc, int part, double *dadr)
{
int n;
short int ssbary = 11;
double xcb[6], xib[6], acb[3], aib[3], dadr1[9], dadrc[9], dadri[9];
if (NBODY == 0)
{
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
}
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
return 0;
}
// planet_ephemeris (tjd, CT, ssbary, &xcb[0], &xcb[3]);
get_ephemeris (tjd, CT, ssbary, xcb);
f_bcrs (tjd, xcb, CT, acb, part, dadrc);
for (n = 0; n <= 5; n++)
{
xib[n] = xic[n] + xcb[n];
}
f_bcrs (tjd, xib, CT, aib, part, dadri);
for (n = 0; n <= 2; n++)
{
acc[n] = aib[n] - acb[n];
}
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = dadri[n];
}
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double f_bcrs (double *tjd, double *xi, int exclude,
double *acc, int part, double *dadr)
{
double fnt[3], fgr[3], xj[11][6], xij[11][6], rij[11], xjk[6], rjk,
xddj[3], sumil, sumjk, sdi2, sdj2, rdirdj, rrrdr2, rjirdd,
rij5, rij3, xijt[9], gra, grb, beta, gamma, unit[9];
short int ssbary, l, k, j, n, flag_gr;
ssbary = 11;
gamma = 1.0;
beta = 1.0;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
for (j = 0; j <= 10; j++)
{
// planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
get_ephemeris (tjd, j, ssbary, xj[j]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
}
flag_gr = 0;
for (n = 0; n < 3; n ++)
fnt[n] = 0;
for (j = 0; j <= 10; j++)
{
if (PERB[j] == 2)
flag_gr = 1;
if (PERB[j] == 0)
continue;
if (j == exclude)
continue;
for (n = 0; n < 3; n++)
fnt[n] = fnt[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (j = 0; j <= 10; j++)
{
if (j == exclude)
continue;
rij5 = pow (rij[j], 5);
rij3 = pow (rij[j], 3);
brmul (xij[j], xij[j], 3,1,3, xijt);
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr[n] + 3 * GMDE[j] * xijt[n] / rij5
- GMDE[j] * unit[n] / rij3;
}
}
}
if (flag_gr == 0)
{
for (n = 0; n < 3; n++)
acc[n] = fnt[n];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdi2 = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
sumil = 0;
for (l = 0; l < 11; l ++)
{
if ( l == exclude)
continue;
if (PERB[l] != 2)
continue;
sumil = sumil + GMDE[l] / rij[l];
}
for (n = 0; n < 3; n ++)
fgr[n] = 0;
for (j = 0; j < 11; j ++)
{
if (PERB[j] != 2)
continue;
if (j == exclude)
continue;
sumjk = 0;
for (n = 0; n < 3; n ++)
xddj[n] = 0;
for (k = 0; k < 11; k ++)
{
if (k == j)
continue; //k!=j
if (PERB[k] != 2)
continue;
for (n = 0; n < 3; n++)
xjk[n] = xj[j][n] - xj[k][n];
rjk = sqrt (xjk[0] * xjk[0] + xjk[1] * xjk[1] + xjk[2] * xjk[2]);
sumjk = sumjk + GMDE[k] / rjk;
for (n = 0; n < 3; n ++)
xddj[n] = xddj[n] - GMDE[k] / (rjk * rjk * rjk) * xjk[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdj2 = xj[j][3] * xj[j][3] + xj[j][4] * xj[j][4]
+ xj[j][5] * xj[j][5];
rdirdj = xi[3] * xj[j][3] + xi[4] * xj[j][4] + xi[5] * xj[j][5];
rrrdr2 = pow( ( xij[j][0] * xj[j][3] + xij[j][1] * xj[j][4]
+ xij[j][2] * xj[j][5]) / rij[j], 2);
rjirdd = - ( xij[j][0] * xddj[0] + xij[j][1] * xddj[1]
+ xij[j][2] * xddj[2]);
gra = - 2 * (beta + gamma) * sumil - (2 * beta -1) * sumjk
+ gamma * sdi2 + (1 + gamma) * sdj2
- 2 * (1 + gamma) * rdirdj - 1.5 * rrrdr2 + 0.5 * rjirdd;
grb = xij[j][0] * ((2+2*gamma) * xi[3] - (1+2*gamma) * xj[j][3])
+ xij[j][1] * ((2+2*gamma) * xi[4] - (1+2*gamma) * xj[j][4])
+ xij[j][2] * ((2+2*gamma) * xi[5] - (1+2*gamma) * xj[j][5]);
for (n = 0; n < 3; n ++)
{
fgr[n] = fgr[n]
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* ( - xij[j][n]) * gra / C / C
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* xij[j][n + 3] * grb / C / C
+ GMDE[j] / rij[j] * (3 + 4 * gamma) * 0.5
* xddj[n] / C / C;
}
}
for (n = 0; n < 3; n++)
acc[n] = fgr[n] + fnt[n];
return 1;
}
double accel_sr_part (double *tjd, double *xic, double *acc, int part,
double *dadr, double *dadsrpb, double *dadsrpt)
{
double j, c1, ap, m, rsp, usp[3], xis[6], xsc[6], f,
xist[9], unit[9], rsp3;
short int n, sun;
short int ssbary = 11;
if (AMR == 0)
{
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (n = 0; n <= 3; n++)
{
dadsrpb[n] = 0;
dadsrpt[n] = 0;
}
}
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
return 0;
}
sun = 10;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
// planet_ephemeris (tjd, sun, CT, &xsc[0], &xsc[3]);
get_ephemeris (tjd, sun, CT, xsc);
for (n = 0; n <= 5; n++)
{
xis[n] = xic[n] - xsc[n];
}
rsp = sqrt (xis[0] * xis[0] + xis[1] * xis[1] + xis[2] * xis[2]);
usp[0] = xis[0] / rsp;
usp[1] = xis[1] / rsp;
usp[2] = xis[2] / rsp;
j = 1352.5; //kg/s3
// j = 1359.4; //kg/s3
// m = SATMASS; //kg
// ap = SATAREA; //m2
c1 = j / C * AU * AU; //kg/s2/m*au*au
f = c1 * AMR / rsp / rsp;
// f = c1 * ap / m / rsp / rsp;
//kg/s2/m*au*au * m2 / kg / au / au = m/s2
// f = f / AU * 86400.0 * 86400.0;
// printf ("SRPB = %f\t SRPT = %f\n", SRPB, SRPT);
for (n = 0; n < 3; n++)
{
acc[n] = f * usp[n] * (1 + SRPB + SRPT * tjd[1]);
}
if (part == 0)
return 1;
rsp3 = rsp * rsp * rsp;
brmul (xis, xis, 3,1,3, xist);
for (n = 0; n <= 8; n++)
{
dadr[n] = - f * (3 * xist[n] / rsp3 - unit[n] / rsp) *
(1 + SRPB + SRPT * tjd[1]);
}
for (n = 0; n <= 2; n++)
{
dadsrpb[n] = f * usp[n];
dadsrpt[n] = f * usp[n] * tjd[1];
}
return 0;
}
double accel_gt_part (double *tjd, double *xic, double *acc, int part,
double *dadr, double *dadk2)
{
int n,k, lps, ntide, blst[12], nb;
double GM, radius, *tmp, pi[3], pe[3], llr[3], c_ie[9], c_ei[9], ae[3], ai[3];
double jd0, tt, utc, te[9], tx[3], ty[3], tz[3], ao[3], as[3], ag[3], dadk2e[3],
vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1}, dadre[9], dadres[9], dadrei[9];
InfStruct info;
GM = GMCT;
radius = RCT;
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
if (NMAX < 2)
{
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (n = 0; n <= 3; n++)
{
dadk2[n] = 0;
}
}
return 0;
}
for (n = 0; n < 3; n++)
{
pi[n] = xic[n];
}
getinfo(tjd, &info);
brmul(info.c_ie, pi, 3, 3, 1, pe);
xyz2llh(pe, llr);
// cs2acc (llr, COEFG, GM, radius, NMAX, ae);
cs2ada (llr, COEFG, NMAX, ae, part, dadre, 1);
brmul(info.c_ei, ae, 3, 3, 1, ag);
for (n = 0; n < 3; n++)
{
acc[n] = ag[n];
}
if (STIDE != 0)
{
if (STIDE == 3 && CT == 2)
stidecs_Anelastic(&info, 1, COEFS);
else if (STIDE == 2 && CT == 2)
stidecs(tjd, info.c_ie, 1, COEFS);
else if (CT == 2)
{
blst[0] = 10; blst[1] = 9; nb = 2;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
else if (CT == 9)
{
blst[0] = 10; blst[1] = 2; nb = 2;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
else if (CT == 20)
{
blst[0] = 10; blst[1] = 5; nb = 2;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
else
{
blst[0] = 10; nb = 1;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
// cs2acc (llr, COEFS, GM, radius, NSMAX, ae);
cs2ada (llr, COEFS, NSMAX, ae, part, dadres, 0);
brmul(info.c_ei, ae, 3, 3, 1, as);
for (n = 0; n < 3; n++)
{
acc[n] = acc[n] + as[n];
}
for (n = 0; n <= 8; n++)
{
dadre[n] = dadre[n] + dadres[n];
}
}
if (OTIDE != 0) // N.A.
{
otidecs(info.jdt, info.gmst, NOMAX, COEFO);
cs2acc (llr, COEFO, GM, radius, NOMAX, ae);
brmul(info.c_ei, ae, 3, 3, 1, ao);
for (n = 0; n < 3; n++)
{
acc[n] = acc[n] + ao[n];
}
}
if (part == 0)
return 1;
brmul(dadre, info.c_ie, 3, 3, 3, dadrei);
brmul(info.c_ei, dadrei, 3, 3, 3, dadr);
if (MTK2 == 1)
{
stidecs_k2 (&info, 1, COEFS, blst, nb);
cs2ada (llr, COEFS, NSMAX, dadk2e, 0, dadres, 0);
brmul(info.c_ei, dadk2e, 3, 3, 1, dadk2);
}
if (MGCS > 0)
{
for (k = 0; k < MGCS; k ++)
{
brmul(info.c_ei, CSinfo[k].dadcse, 3, 3, 1, CSinfo[k].dadcs);
}
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs_k2(InfStruct *info, double k2, double *stcs, int *body, int nbody)
{
int sun, i;
double xs[6], gms2e, tjd[2],
pse[3], llrs[3], pbar[4], t,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rers, c20, c21, s21, c22, s22;
tjd[0] = info->jd0;
tjd[1] = info->tt/86400.0;
c20 = 0; c21 = 0; s21 = 0; c22 = 0; s22 = 0;
for (i = 0; i < nbody; i++)
{
sun = body[i];
if (sun > 12)
continue;
get_ephemeris (tjd, sun, CT, xs);
// planet_ephemeris (tjd, sun, earth, ps, vs);
brmul (info->c_ie, xs, 3, 3, 1, pse);
xyz2llh(pse, llrs);
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
gms2e = GMDE[sun]/GMCT;
rers = RCT / llrs[2];
c20 += k2/5.0 * ( gms2e * pow(rers, 3) * p20s );
c21 += k2/5.0 * ( gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s21 += k2/5.0 * ( gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
c22 += k2/5.0 * ( gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s22 += k2/5.0 * ( gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
}
stcs[0] = 0; //c00;
stcs[1] = 0; //c10;
stcs[2] = c20;
stcs[3] = 0;
stcs[4] = c21;
stcs[5] = 0; //s11;
stcs[6] = s21;
stcs[7] = c22;
stcs[8] = s22;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullaccel -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double earth_fullaccel (double jd, double tt, double *xic, double *fxic)
{
int n;
short int ssbary = 11, part = 0;
double tjd[2], ap[3], an[3], ar[3], ag[3], apgr[3], angr[3], at[3], ao[3],
acc[3];
tjd[0] = jd;
tjd[1] = tt;
// printf("%f\t%f\n", jd, tt);
// accel_point (tjd, xic, a1, a1gr);
accel_pmiers (tjd, xic, ap, apgr);
if (NBODY == 1)
accel_nbody (tjd, xic, an, angr);
else
{
for (n = 0; n < 3; n++)
{
an[n] = 0;
angr[n] = 0;
}
}
if (AMR != 0 )
accel_slrad (tjd, xic, ar);
else
{
for (n = 0; n < 3; n++)
{
ar[n] = 0;
}
}
if (NMAX >= 2 )
{
// if (STIDE == 1)
// {
accel_gravtide (tjd, xic, ag, at, ao);
// }
// else
// {
// accel_gravt (tjd, xic, ag);
// at[0] = 0; at[1] = 0; at[2] = 0;
// }
}
else
{
for (n = 0; n < 3; n++)
{
ag[n] = 0;
at[n] = 0;
ao[n] = 0;
}
}
if (RELTIV == 0)
{
for (n = 0; n < 3; n++)
{
apgr[n] = 0;
angr[n] = 0;
}
}
else if (RELTIV == 1)
{
for (n = 0; n < 3; n++)
{
angr[n] = 0;
}
}
for (n = 0; n < 3; n++)
{
// a1[n] = 0; a1gr[n] = 0;
// a2gr[n] = 0;
// ao[n] = 0;
acc[n] = ap[n] + an[n] + ar[n] + ag[n] + at[n] + ao[n]
+ apgr[n] + angr[n];
// printf ("ag=%e at=%e ao=%e \n", ag[n], at[n], ao[n]);
}
// exit(0);
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
return 0;
}
double accel_gravtide (double *tjd, double *xic, double *ag, double *as, double *ao)
{
int n, lps, ntide;
double GM, radius, *stcs, pi[3], pe[3], llr[3], c_ie[9], c_ei[9], ae[3], ai[3];
double jd0, tt, utc, te[9], tx[3], ty[3], tz[3],
vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1};
InfStruct info;
GM = GMCT;
radius = RCT;
for (n = 0; n < 3; n++)
{
pi[n] = xic[n] * AU;
}
getinfo(tjd, &info);
/*
printf ("jd0 = %.10f\t lps = %d\t gmt = %.10f\n", info.jd0, info.leaps, info.gmst );
for (n = 0; n < 9; n++)
printf ("%e\n", info.c_ie[n]);
jd0 = tjd[0];
tt = tjd[1] * 86400.0;
lps = getlps (jd0 + tt/86400.0);
utc = tt - (lps + 32.184);
// printf("%f\t%f\n", jd0, utc);
itrf2gcrf(jd0, utc, vx, tx);
itrf2gcrf(jd0, utc, vy, ty);
itrf2gcrf(jd0, utc, vz, tz);
for (n = 0; n < 3; n++)
{
c_ei[n*3] = tx[n];
c_ei[n*3+1] = ty[n];
c_ei[n*3+2] = tz[n]; //c_ei mean ITRF2ICRF
}
mt(c_ei, 3, 3, c_ie);
for (n = 0; n < 9; n++)
printf ("%e\n", c_ie[n]);
exit(0);
*/
brmul(info.c_ie, pi, 3, 3, 1, pe);
xyz2llh(pe, llr);
// xyz2llh(pi, llr);
cs2acc (llr, COEFG, GM, radius, NMAX, ae);
// cs2acc (llr, COEFG, GM, radius, NMAX, ai);
// printf("ae = %f\t%f\t%f\n", ae[0], ae[1], ae[2]);
// exit(0);
brmul(info.c_ei, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
ag[n] = ai[n] / AU * 86400 * 86400;
}
if (STIDE == 0 && OTIDE == 0)
{
for (n = 0; n < 3; n++)
{
as[n] = 0;
ao[n] = 0;
}
return 0;
}
// ntide = 4;
// COEFS = (double *) calloc ( (ntide + 1) * (ntide + 1), sizeof(double));
// id_perm = PERM;
// stidecs(tjd, c_ie, 1, stcs);
if (STIDE == 1)
stidecs_Anelastic(&info, 1, COEFS);
if (STIDE == 2)
stidecs(tjd, info.c_ie, 1, COEFS);
cs2acc (llr, COEFS, GM, radius, NSMAX, ae);
brmul(info.c_ei, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
as[n] = ai[n] / AU * 86400 * 86400;
}
if (OTIDE == 0)
{
for (n = 0; n < 3; n++)
{
ao[n] = 0;
}
return 0;
}
// NOMAX = 4;
// COEFO = (double *) calloc ( (NOMAX + 1) * (NOMAX + 1), sizeof(double));
otidecs(info.jdt, info.gmst, NOMAX, COEFO);
// printf ("jdt = %e\t gmst = %f\t NOMAX = %d\t COEFO = %e\n", info.jdt, info.gmst, NOMAX, COEFO[5]);
// for (n=0;n<25;n++)
// printf ("COEFO = %e\t COEFS = %e\n", COEFO[n], COEFS[n]);
cs2acc (llr, COEFO, GM, radius, NOMAX, ae);
brmul(info.c_ei, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
ao[n] = ai[n] / AU * 86400 * 86400;
}
if (STIDE == 0)
{
for (n = 0; n < 3; n++)
{
as[n] = 0;
}
return 0;
}
return 0;
}
int openotcs (char *infile)
{
FILE *fp_ot;
int i;
char string[100];
if ((fp_ot = fopen (infile,"r")) == NULL)
{
printf ("Cannot open otide file?\n");
exit (0);
}
// fgets (string, 100, fp_ot);
// fgets (string, 100, fp_ot);
// fgets (string, 100, fp_ot);
// fgets (string, 100, fp_ot);
i = 0;
while (1)
{
if (fgets (string, 100, fp_ot) == NULL) break;
sscanf (string, "%lf%s%d%d%lf%lf%lf%lf",
&otfes[i].ds, &otfes[i].name, &otfes[i].n, &otfes[i].m, &otfes[i].cp, &otfes[i].sp, &otfes[i].cm, &otfes[i].sm);
otfes[i].argn[0] = (int)(otfes[i].ds/100)%10;
otfes[i].argn[1] = (int)(otfes[i].ds/10)%10 - 5;
otfes[i].argn[2] = (int)(otfes[i].ds/1)%10 - 5;
otfes[i].argn[3] = (int)(otfes[i].ds*10)%10 - 5;
otfes[i].argn[4] = (int)(otfes[i].ds*100)%10 - 5;
otfes[i].argn[5] = (int)(otfes[i].ds*1000)%10 - 5;
i++;
}
// (*n) = i;
fclose(fp_ot);
return 0;
}
double otidecs(double jdt, double gmst, int nmax, double *coef)
{
double doodarg[6], ang, cp, sp, cm, sm;
int i, ncon = 1, n,m, l, ind;
for (i = 0; i < NFES; i++)
{
if (otfes[i].n > nmax)
{
continue;
}
n = otfes[i].n;
m = otfes[i].m;
cp = otfes[i].cp;
sp = otfes[i].sp;
cm = otfes[i].cm;
sm = otfes[i].sm;
// DOODSN(&jdt, &gmst, otfes[i].argn, &ncon, doodarg, &ang);
// ang=0;
// printf ("cp = %e\t sp = %e\t ang = %e\t doodarg = %e\n", cp, sp, ang, doodarg[3]);
if (m == 0)
{
coef[n] = coef[n] + 1e-11 * ((cp+cm) * cos(ang) + (sp+sm)*sin(ang));
}
else
{
l = nmax - m + 1;
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
coef[ind + n - m] = coef[ind + n - m] + 1e-11 * ((cp+cm) * cos(ang) + (sp+sm)*sin(ang));
coef[ind + n - m + l] = coef[ind + n - m + l] + 1e-11 * ((sp-sm) * cos(ang) - (cp-cm)*sin(ang));
}
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs_Anelastic(InfStruct *info, int id_perm, double *stcs)
{
// double gms2e = 332946.048166;
// double gmm2e = 1/81.3005690699;
// double gms2e = 332946.0487185;
// double gmm2e = 1/81.3005538970823;
double GMsun = 1.32712442076e20;
double gms2e, gmm2e = 0.0123000383;
// double c20pt = -4.1736e-9;
double c20pt = -4.201e-9;
double k20 = 0.29525;
double k21 = 0.29470;
double k22 = 0.29801;
double REk20 = 0.30190;
double REk21 = 0.29830;
double REk22 = 0.30102;
double IMk21 = -0.00144;
double IMk22 = -0.00130;
double k20pa = -0.00089;
double k21pa = -0.00080;
double k22pa = -0.00057;
double k20p = -0.00087;
double k21p = -0.00079;
double k22p = -0.00057;
double k30 = 0.093;
double k31 = 0.093;
double k32 = 0.093;
double k33 = 0.094;
short int moon = 9, earth = 2, sun = 10, n;
double ps[3], vs[3], pm[3], vm[3], tjd[2],
pse[3], pme[3], llrs[3], llrm[3], pbar[4], t,
p20m, p30m, p21m, p31m, p22m, p32m, p33m,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rerm, rers, c20, c30, c40, c21, s21, c22, s22, c31, s31,
c32, s32, c33, s33, c41, s41, c42, s42,
c20f, c21f, s21f, c22f, s22f;
double GM, radius;
GM = 398600.44180E+09;
radius = 6378136.6;
gms2e = GMsun/GM;
tjd[0] = info->jd0;
tjd[1] = info->tt/86400.0;
// Luni-solar ephemeris
planet_ephemeris (tjd, sun, earth, ps, vs);
planet_ephemeris (tjd, moon, earth, pm, vm);
for (n = 0; n < 3; n++)
{
ps[n] = ps[n] * AU;
pm[n] = pm[n] * AU;
}
// icrf2itrf(num, ps, pse);
// icrf2itrf(num, pm, pme);
brmul (info->c_ie, ps, 3, 3, 1, pse); //inertial to fixed matrix gmat = rmat*tbt
brmul (info->c_ie, pm, 3, 3, 1, pme); //inertial to fixed matrix gmat = rmat*tbt
xyz2llh(pse, llrs);
xyz2llh(pme, llrm);
t = sin(llrm[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20m = pbar[2]; p30m = pbar[3];
lgdr(t, 3, 1, pbar); p21m = pbar[1]; p31m = pbar[2];
lgdr(t, 3, 2, pbar); p22m = pbar[0]; p32m = pbar[1];
lgdr(t, 3, 3, pbar); p33m = pbar[0];
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
rerm = radius / llrm[2];
rers = radius / llrs[2];
// Frequency Independent Terms
// C20
c20 = REk20/5.0 * ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C21/S21
c21 = + REk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) )
+ IMk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
s21 = - IMk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) )
+ REk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C22/S22
c22 = + REk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) )
+ IMk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
s22 = - IMk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) )
+ REk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
// C30
c30 = k30/7.0 * ( gmm2e * pow(rerm, 4) * p30m
+ gms2e * pow(rers, 4) * p30s );
// C31/S31
c31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * cos(llrs[1] * DEG2RAD) );
s31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * sin(llrs[1] * DEG2RAD) );
// C32/S32
c32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * cos(llrs[1] * DEG2RAD * 2.0) );
s32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * sin(llrs[1] * DEG2RAD * 2.0) );
// C33/S33
c33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * cos(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * cos(llrs[1] * DEG2RAD * 3.0) );
s33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * sin(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * sin(llrs[1] * DEG2RAD * 3.0) );
// C40
c40 = k20pa/5.0* ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C41/S41
c41 = k21pa/5.0* ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s41 = k21pa/5.0* ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C42/S42
c42 = k22pa/5.0* ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s42 = k22pa/5.0* ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
stcs[0] = 0; //c00;
stcs[1] = 0; //c10;
stcs[2] = c20;
stcs[3] = c30;
stcs[4] = c40;
stcs[5] = 0; //c11;
stcs[6] = c21;
stcs[7] = c31;
stcs[8] = c41;
stcs[9] = 0; //s11;
stcs[10] = s21;
stcs[11] = s31;
stcs[12] = s41;
stcs[13] = c22;
stcs[14] = c32;
stcs[15] = c42;
stcs[16] = s22;
stcs[17] = s32;
stcs[18] = s42;
stcs[19] = c33;
stcs[20] = 0; //c43;
stcs[21] = s33;
stcs[22] = 0; //s43;
stcs[23] = 0; //c44;
stcs[24] = 0; //s44;
// Frequency Dependent Terms
c20f = 0; c21f = 0; s21f = 0; c22f = 0; s22f = 0;
// stfrqdep(info->jdt, info->gmst, &c20f, &c21f, &s21f, &c22f, &s22f);
stcs[2] = c20 + c20f;
stcs[6] = c21 + c21f;
stcs[10] = s21 + s21f;
stcs[13] = c22 + c22f;
stcs[16] = s22 + s22f;
if(id_perm==1)
{
stcs[2] = c20 + c20f - c20pt;
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs(double *tjd, double *c_ie, int id_perm, double *stcs)
{
// double gms2e = 332946.048166;
// double gmm2e = 1/81.3005690699;
// double gms2e = 332946.0487185;
// double gmm2e = 1/81.3005538970823;
double GMsun = 1.32712442076e20;
double gms2e, gmm2e = 0.0123000383;
// double c20pt = -4.1736e-9;
double c20pt = -4.201e-9;
double k20 = 0.29525;
double k21 = 0.29470;
double k22 = 0.29801;
/*
double REk20 = 0.30190;
double REk21 = 0.29830;
double REk22 = 0.30102;
double IMk21 = −0.00144;
double IMk22 = −0.00130;
double k20pa = −0.00089;
double k21pa = −0.00080;
double k22pa = −0.00057;
*/
double k20p = -0.00087;
double k21p = -0.00079;
double k22p = -0.00057;
double k30 = 0.093;
double k31 = 0.093;
double k32 = 0.093;
double k33 = 0.094;
short int moon = 9, earth = 2, sun = 10, n;
double ps[3], vs[3], pm[3], vm[3],
pse[3], pme[3], llrs[3], llrm[3], pbar[4], t,
p20m, p30m, p21m, p31m, p22m, p32m, p33m,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rerm, rers, c20, c30, c40, c21, s21, c22, s22, c31, s31,
c32, s32, c33, s33, c41, s41, c42, s42,
c20f, c21f, s21f, c22f, s22f;
double GM, radius;
GM = 398600.44180E+09;
radius = 6378136.6;
gms2e = GMsun/GM;
// tjd[0] = info[num].jd0;
// tjd[1] = info[num].tt/86400.0;
// Luni-solar ephemeris
planet_ephemeris (tjd, sun, earth, ps, vs);
planet_ephemeris (tjd, moon, earth, pm, vm);
for (n = 0; n < 3; n++)
{
ps[n] = ps[n] * AU;
pm[n] = pm[n] * AU;
}
// icrf2itrf(num, ps, pse);
// icrf2itrf(num, pm, pme);
brmul (c_ie, ps, 3, 3, 1, pse); //inertial to fixed matrix gmat = rmat*tbt
brmul (c_ie, pm, 3, 3, 1, pme); //inertial to fixed matrix gmat = rmat*tbt
xyz2llh(pse, llrs);
xyz2llh(pme, llrm);
t = sin(llrm[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20m = pbar[2]; p30m = pbar[3];
lgdr(t, 3, 1, pbar); p21m = pbar[1]; p31m = pbar[2];
lgdr(t, 3, 2, pbar); p22m = pbar[0]; p32m = pbar[1];
lgdr(t, 3, 3, pbar); p33m = pbar[0];
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
rerm = radius / llrm[2];
rers = radius / llrs[2];
// Frequency Independent Terms
// C20
c20 = k20/5.0 * ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C21/S21
c21 = k21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s21 = k21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C22/S22
c22 = k22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s22 = k22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
// C30
c30 = k30/7.0 * ( gmm2e * pow(rerm, 4) * p30m
+ gms2e * pow(rers, 4) * p30s );
// C31/S31
c31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * cos(llrs[1] * DEG2RAD) );
s31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * sin(llrs[1] * DEG2RAD) );
// C32/S32
c32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * cos(llrs[1] * DEG2RAD * 2.0) );
s32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * sin(llrs[1] * DEG2RAD * 2.0) );
// C33/S33
c33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * cos(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * cos(llrs[1] * DEG2RAD * 3.0) );
s33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * sin(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * sin(llrs[1] * DEG2RAD * 3.0) );
// C40
c40 = k20p/5.0* ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C41/S41
c41 = k21p/5.0* ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s41 = k21p/5.0* ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C42/S42
c42 = k22p/5.0* ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s42 = k22p/5.0* ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
stcs[0] = 0; //c00;
stcs[1] = 0; //c10;
stcs[2] = c20;
stcs[3] = c30;
stcs[4] = c40;
stcs[5] = 0; //c11;
stcs[6] = c21;
stcs[7] = c31;
stcs[8] = c41;
stcs[9] = 0; //s11;
stcs[10] = s21;
stcs[11] = s31;
stcs[12] = s41;
stcs[13] = c22;
stcs[14] = c32;
stcs[15] = c42;
stcs[16] = s22;
stcs[17] = s32;
stcs[18] = s42;
stcs[19] = c33;
stcs[20] = 0; //c43;
stcs[21] = s33;
stcs[22] = 0; //s43;
stcs[23] = 0; //c44;
stcs[24] = 0; //s44;
// Frequency Dependent Terms
c20f = 0; c21f = 0; s21f = 0; c22f = 0; s22f = 0;
// stfrqdep(info.jdt, info.gmst, &c20f, &c21f, &s21f, &c22f, &s22f);
stcs[2] = c20 + c20f;
stcs[6] = c21 + c21f;
stcs[10] = s21 + s21f;
stcs[13] = c22 + c22f;
stcs[16] = s22 + s22f;
if(id_perm==1)
{
stcs[2] = c20 + c20f - c20pt;
}
return 0;
}
double stfrqdep(double jdt, double gmst, double *c20f, double *c21f, double *s21f, double *c22f, double *s22f)
{
double sets[71][8] = {
0,5,5,5,6,5, 16.6e-12, -6.7e-12,
0,5,5,5,7,5, -0.1e-12, 0.1e-12,
0,5,6,5,5,4, -1.2e-12, 0.8e-12,
0,5,7,5,5,5, -5.5e-12, 4.3e-12,
0,5,7,5,6,5, 0.1e-12, -0.1e-12,
0,5,8,5,5,4, -0.3e-12, 0.2e-12,
0,6,3,6,5,5, -0.3e-12, 0.7e-12,
0,6,5,4,4,5, 0.1e-12, -0.2e-12,
0,6,5,4,5,5, -1.2e-12, 3.7e-12,
0,6,5,4,6,5, 0.1e-12, -0.2e-12,
0,6,5,6,5,5, 0.1e-12, -0.2e-12,
0,7,3,5,5,5, 0.0e-12, 0.6e-12,
0,7,5,3,5,5, 0.0e-12, 0.3e-12,
0,7,5,5,5,5, 0.6e-12, 6.3e-12,
0,7,5,5,6,5, 0.2e-12, 2.6e-12,
0,7,5,5,7,5, 0.0e-12, 0.2e-12,
0,8,3,6,5,5, 0.1e-12, 0.2e-12,
0,8,5,4,5,5, 0.4e-12, 1.1e-12,
0,8,5,4,6,5, 0.2e-12, 0.5e-12,
0,9,3,5,5,5, 0.1e-12, 0.2e-12,
0,9,5,3,5,5, 0.1e-12, 0.1e-12,
1,2,5,7,5,5, -0.1e-12, 0.0e-12,
1,2,7,5,5,5, -0.1e-12, 0.0e-12,
1,3,5,6,4,5, -0.1e-12, 0.0e-12,
1,3,5,6,5,5, -0.7e-12, 0.1e-12,
1,3,7,4,5,5, -0.1e-12, 0.0e-12,
1,4,5,5,4,5, -1.3e-12, 0.1e-12,
1,4,5,5,5,5, -6.8e-12, 0.6e-12,
1,4,7,5,5,5, 0.1e-12, 0.0e-12,
1,5,3,6,5,5, 0.1e-12, 0.0e-12,
1,5,5,4,4,5, 0.1e-12, 0.0e-12,
1,5,5,4,5,5, 0.4e-12, 0.0e-12,
1,5,5,6,5,5, 1.3e-12, -0.1e-12,
1,5,5,6,6,5, 0.3e-12, 0.0e-12,
1,5,7,4,5,5, 0.3e-12, 0.0e-12,
1,5,7,4,6,5, 0.1e-12, 0.0e-12,
1,6,2,5,5,6, -1.9e-12, 0.1e-12,
1,6,3,5,4,5, 0.5e-12, 0.0e-12,
1,6,3,5,5,5, -43.4e-12, 2.9e-12,
1,6,4,5,5,4, 0.6e-12, 0.0e-12,
1,6,4,5,5,6, 1.6e-12, -0.1e-12,
1,6,5,3,4,5, 0.1e-12, 0.0e-12,
1,6,5,5,3,5, 0.1e-12, 0.0e-12,
1,6,5,5,4,5, -8.8e-12, 0.5e-12,
1,6,5,5,5,5, 470.9e-12, -30.2e-12,
1,6,5,5,6,5, 68.1e-12, -4.6e-12,
1,6,5,5,7,5, -1.6e-12, 0.1e-12,
1,6,6,4,5,5, 0.1e-12, 0.0e-12,
1,6,6,5,4,4, -0.1e-12, 0.0e-12,
1,6,6,5,5,4, -20.6e-12, -0.3e-12,
1,6,6,5,5,6, 0.3e-12, 0.0e-12,
1,6,6,5,6,4, -0.3e-12, 0.0e-12,
1,6,7,3,5,5, -0.2e-12, 0.0e-12,
1,6,7,3,6,5, -0.1e-12, 0.0e-12,
1,6,7,5,5,5, -5.0e-12, 0.3e-12,
1,6,7,5,6,5, 0.2e-12, 0.0e-12,
1,6,8,5,5,4, -0.2e-12, 0.0e-12,
1,7,3,6,5,5, -0.5e-12, 0.0e-12,
1,7,3,6,6,5, -0.1e-12, 0.0e-12,
1,7,5,4,4,5, 0.1e-12, 0.0e-12,
1,7,5,4,5,5, -2.1e-12, 0.1e-12,
1,7,5,4,6,5, -0.4e-12, 0.0e-12,
1,8,3,5,5,5, -0.2e-12, 0.0e-12,
1,8,5,3,5,5, -0.1e-12, 0.0e-12,
1,8,5,5,5,5, -0.6e-12, 0.0e-12,
1,8,5,5,6,5, -0.4e-12, 0.0e-12,
1,8,5,5,7,5, -0.1e-12, 0.0e-12,
1,9,5,4,5,5, -0.1e-12, 0.0e-12,
1,9,5,4,6,5, -0.1e-12, 0.0e-12,
2,4,5,6,5,5, -0.3e-12, 0.0e-12,
2,5,5,5,5,5, -1.2e-12, 0.0e-12
};
double doodarg[6], ang, c20 = 0, c21 = 0, s21 = 0, c22 = 0, s22 = 0;
int i, nsets = 71, argn[6], ncon = 1;
for (i=0;i<nsets;i++)
{
argn[0] = (int)sets[i][0];
argn[1] = (int)sets[i][1] - 5;
argn[2] = (int)sets[i][2] - 5;
argn[3] = (int)sets[i][3] - 5;
argn[4] = (int)sets[i][4] - 5;
argn[5] = (int)sets[i][5] - 5;
// DOODSN(&info[num].jdt, &info[num].gmst, argn, &ncon, doodarg, &ang);
// DOODSN(&jdt, &gmst, otfes[i].argn, &ncon, doodarg, &ang);
// C20 correction: Long period tidal constituent
if(argn[0]==0)
{
c20 = c20 + sets[i][6]*cos(ang) - sets[i][7]*sin(ang);
}
// C21/S21 correction: Diurnal period tidal constituent
if(argn[0]==1)
{
c21 = c21 + sets[i][6]*sin(ang) + sets[i][7]*cos(ang);
s21 = s21 + sets[i][6]*cos(ang) - sets[i][7]*sin(ang);
}
// C22/S22 correction: Semi-diurnal period tidal constituent
if(argn[0]==2)
{
c22 = c22 + sets[i][6]*cos(ang);
s22 = s22 - sets[i][6]*sin(ang);
}
}
*c20f = c20;
*c21f = c21;
*s21f = s21;
*c22f = c22;
*s22f = s22;
return 0;
}
double accel_gravt (double *tjd, double *xic, double *a4)
{
int n, lps;
double GM, radius, pi[3], pe[3], llr[3], c_ie[9], c_ei[9], ae[3], ai[3];
double jd0, tt, utc, te[9], tx[3], ty[3], tz[3],
vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1};
GM = 398600.44150E+09;
radius = 6378136.3;
for (n = 0; n < 3; n++)
{
pi[n] = xic[n] * AU;
}
jd0 = tjd[0];
tt = tjd[1] * 86400.0;
lps = getlps (jd0 + tt/86400.0);
utc = tt - (lps + 32.184);
// printf("%f\t%f\n", jd0, utc);
itrf2gcrf(jd0, utc, vx, tx);
itrf2gcrf(jd0, utc, vy, ty);
itrf2gcrf(jd0, utc, vz, tz);
for (n = 0; n < 3; n++)
{
c_ie[n*3] = tx[n];
c_ie[n*3+1] = ty[n];
c_ie[n*3+2] = tz[n];
}
mt(c_ie, 3, 3, c_ei);
brmul(c_ei, pi, 3, 3, 1, pe);
xyz2llh(pe, llr);
cs2acc (llr, COEFG, GM, radius, NMAX, ae);
// printf("ae = %f\t%f\t%f\n", ae[0], ae[1], ae[2]);
// exit(0);
brmul(c_ie, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
a4[n] = ai[n] / AU * 86400 * 86400;
}
return 0;
}
double cs2ada (double *llr, double *cs, int nmax, double *ae,
int part, double *dadre, int flagdadcs)
{
int n, m, k, l, ind, ic, is, label;
double slat, clat, slon, clon, sclt, cclt, *cosml, *sinml,
*aprn, *pbar, *pbar1, *pbar2, *pt, *pt1, *pt2, lat, lon, r, vi, t,
gm, a, an[3], c_en[9], c_ne[9], dadrn[9], dadrne[9], dadre1[9],
dadre2[9], dcdrn[27], dcdre[27], dcdxe[9], dcdye[9], dcdze[9],
dadrecx[3], dadrecy[3], dadrecz[3], dadrec[9], dadrea[9];
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
gm = GMCT;
a = RCT;
lat = llr[0];
lon = llr[1];
r = llr[2];
slat = sin(lat * DEG2RAD);
clat = cos(lat * DEG2RAD);
slon = sin(lon * DEG2RAD);
clon = cos(lon * DEG2RAD);
sclt = clat;
cclt = slat;
cosml = (double *) calloc ( nmax + 1, sizeof(double)); //cos(m*lamta)
sinml = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
aprn = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
pbar = (double *) calloc ( nmax + 1, sizeof(double));
pbar1 = (double *) calloc ( nmax + 1, sizeof(double));
pbar2 = (double *) calloc ( nmax + 1, sizeof(double));
pt = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
pt1 = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
pt2 = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
cosml[0] = 1; sinml[0] = 0;
for (m = 1; m <= nmax; m++)
{
cosml[m] = cos(m * lon * DEG2RAD);
sinml[m] = sin(m * lon * DEG2RAD);
}
for (n = 0; n <= nmax; n++)
{
aprn[n] = pow (a / r, n) * gm / r;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
an[0] = 0; an[1] = 0; an[2] = 0;
t = cclt; vi = 0;
for (m = 0; m <= nmax; m ++)
{
l = nmax - m + 1;
lgdr2(t, nmax, m, pbar, pbar1, pbar2);
for (k = 0; k < l; k++)
{
if (m==0)
{
n = k;
ic = n;
is = 0;
}
else
{
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
n = k + m;
ic = ind + n - m;
is = ind + n - m + l;
}
pt[ic] = aprn[n] * pbar[k] * cosml[m];
pt[is] = aprn[n] * pbar[k] * sinml[m];
pt1[ic] = aprn[n] * pbar1[k] * cosml[m];
pt1[is] = aprn[n] * pbar1[k] * sinml[m];
pt2[ic] = aprn[n] * pbar2[k] * cosml[m];
pt2[is] = aprn[n] * pbar2[k] * sinml[m];
vi = vi + pt[ic] * cs[ic] + pt[is] * cs[is];
an[0] = an[0] + pt1[ic] * cs[ic] + pt1[is] * cs[is];
an[1] = an[1] - m * pt[is] * cs[ic] + m * pt[ic] * cs[is];
an[2] = an[2] + (n+1) * pt[ic] * cs[ic] + (n+1) * pt[is] * cs[is];
// an[2] = an[2] + (n+1) * ( pt[ic] * cs[ic] + pt[is] * cs[is]);
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
free (pbar);
free (pbar1);
free (pbar2);
free (cosml);
free (sinml);
free (aprn);
an[0] = - an[0] / r;
an[1] = + an[1] / r / sclt;
an[2] = + an[2] / r;
c_ne[0] = - slat * clon; //nsys: North-East-Down
c_ne[1] = - slon;
c_ne[2] = - clat * clon;
c_ne[3] = - slat * slon;
c_ne[4] = clon;
c_ne[5] = - clat * slon;
c_ne[6] = clat;
c_ne[7] = 0;
c_ne[8] = - slat;
brmul(c_ne, an, 3, 3, 1, ae); //from n-sys to e-sys
for (n = 0; n < 9; n++)
dadre[n] = 0;
if (part == 0)
{
free (pt);
free (pt1);
free (pt2);
return vi;
}
for (n = 0; n < 9; n++)
dadrn[n] = 0;
for (m = 0; m <= nmax; m ++)
{
l = nmax - m + 1;
for (k = 0; k < l; k++)
{
if (m==0)
{
n = k;
ic = n;
is = 0;
}
else
{
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
n = k + m;
ic = ind + n - m;
is = ind + n - m + l;
}
dadrn[0] += pt2[ic] * cs[ic] + pt2[is] * cs[is];
dadrn[3] += m * ( - pt[is] * cs[ic] + pt[ic] * cs[is]) * cclt / sclt / sclt
- m * ( - pt1[is] * cs[ic] + pt1[ic] * cs[is]) / sclt;
dadrn[6] -= (n+1) * (pt1[ic] * cs[ic] + pt1[is] * cs[is]);
dadrn[1] -= m * ( - pt1[is] * cs[ic] + pt1[ic] * cs[is]) / sclt;
dadrn[4] -= m * m * (pt[ic] * cs[ic] + pt[is] * cs[is]) / sclt / sclt;
dadrn[7] += m * (n+1) * ( - pt[is] * cs[ic] + pt[ic] * cs[is]) / sclt;
dadrn[2] -= (n+2) * (pt1[ic] * cs[ic] + pt1[is] * cs[is]);
dadrn[5] += m * (n+2) * ( - pt[is] * cs[ic] + pt[ic] * cs[is]) / sclt;
dadrn[8] += (n+2) * (n+1)* (pt[ic] * cs[ic] + pt[is] * cs[is]);
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 9; n++)
dadrn[n] = dadrn[n] / r / r;
mt(c_ne, 3, 3, c_en);
brmul(dadrn, c_en, 3, 3, 3, dadrne);
brmul(c_ne, dadrne, 3, 3, 3, dadrea);
for (n = 0; n < 27; n++)
dcdrn[n] = 0;
dcdrn[0] = - sclt * clon; dcdrn[1] = cclt * slon / sclt;
dcdrn[3] = 0; dcdrn[4] = - clon / sclt;
dcdrn[6] = cclt * clon;/**/dcdrn[7] = slon;
dcdrn[9] = - sclt * slon; dcdrn[10] = - cclt * clon / sclt;
dcdrn[12] = 0; dcdrn[13] = - slon / sclt;
dcdrn[15] = cclt * slon; dcdrn[16] = - clon;
dcdrn[18] = - cclt; dcdrn[19] = 0;
dcdrn[21] = 0; dcdrn[22] = 0;
dcdrn[24] = - sclt; dcdrn[25] = 0;
for (n = 0; n < 27; n++)
dcdrn[n] = dcdrn[n] / r;
brmul(dcdrn, c_en, 9, 3, 3, dcdre);
for (n = 0; n < 9; n++)
{
dcdxe[n] = dcdre[n*3];
dcdye[n] = dcdre[n*3+1];
dcdze[n] = dcdre[n*3+2];
// printf ("%e\n", dcdze[n]);
}
brmul(dcdxe, an, 3, 3, 1, dadrecx);
brmul(dcdye, an, 3, 3, 1, dadrecy);
brmul(dcdze, an, 3, 3, 1, dadrecz);
for (n = 0; n < 3; n++)
{
dadrec[n*3] = dadrecx[n];
dadrec[n*3+1] = dadrecy[n];
dadrec[n*3+2] = dadrecz[n];
}
for (n = 0; n <= 8; n++)
{
dadre[n] = dadrec[n] + dadrea[n];
}
if (flagdadcs == 0)
{
free (pt);
free (pt1);
free (pt2);
return vi;
}
// for dadcs_nm
//
for (k = 0; k < MGCS; k ++)
{
n = CSinfo[k].n; m = CSinfo[k].m; label = CSinfo[k].cs;
if (m == 0)
{
ic = n;
CSinfo[k].dadcsn[0] = - pt1[ic] / r;
CSinfo[k].dadcsn[1] = 0;
CSinfo[k].dadcsn[2] = (n+1) * pt[ic] / r;
}
else
{
l = nmax - m + 1;
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
ic = ind + n - m;
is = ind + n - m + l;
if (label == 1)
{
CSinfo[k].dadcsn[0] = - pt1[ic] / r;
CSinfo[k].dadcsn[1] = - m * pt[is] / r / sclt;
CSinfo[k].dadcsn[2] = (n+1) * pt[ic] / r;
}
if (label == -1)
{
CSinfo[k].dadcsn[0] = - pt1[is] / r;
CSinfo[k].dadcsn[1] = m * pt[ic] / r / sclt;
CSinfo[k].dadcsn[2] = (n+1) * pt[is] / r;
}
}
brmul(c_ne, CSinfo[k].dadcsn, 3, 3, 1, CSinfo[k].dadcse);
}
free (pt);
free (pt1);
free (pt2);
return vi;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double cs2acc (double *llr, double *cs, double gm, double a, int nmax,
double *acc)
{
int n, m, k, l, ind;
double sinf, cosf, sinlon, coslon, sincolat, coscolat, *cosml, *sinml,
*aprn, *pbar, *pbar1, *pbar2, accn[3], c_ei[9], c_en[9], c_in[9],
*pt, *ptt, lat, lon, r, vi, dvdr, dvdcolat, dvdlon, t;
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
lat = llr[0];
lon = llr[1];
r = llr[2];
sinf = sin(lat * DEG2RAD);
cosf = cos(lat * DEG2RAD);
sinlon = sin(lon * DEG2RAD);
coslon = cos(lon * DEG2RAD);
sincolat = cosf;
coscolat = sinf;
// #pragma omp parallel private(cosml, sinml, aprn, pbar, pbar1, pbar2, n, m, k, l, ind, sinf, cosf)
cosml = (double *) calloc ( nmax + 1, sizeof(double)); //cos(m*lamta)
sinml = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
aprn = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
pbar = (double *) calloc ( nmax + 1, sizeof(double));
pbar1 = (double *) calloc ( nmax + 1, sizeof(double));
pbar2 = (double *) calloc ( nmax + 1, sizeof(double));
pt = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
ptt = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
for (m = 0; m <= nmax; m++)
{
cosml[m] = cos(m * lon * DEG2RAD);
sinml[m] = sin(m * lon * DEG2RAD);
}
for (n = 0; n <= nmax; n++)
{
aprn[n] = pow (a / r, n) * gm / r;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
t = coscolat; vi = 0; dvdlon = 0; dvdcolat = 0; dvdr = 0;
for (m = 0; m <= nmax; m ++)
{
l = nmax - m + 1;
lgdr2(t, nmax, m, pbar, pbar1, pbar2);
// lgdr(t, nmax, m, pbar);
for (k = 0; k < l; k++)
{
if (m==0)
{
// ind = 0;
n = k + m;
pt[k] = aprn[n] * pbar[k];
ptt[k] = aprn[n] * pbar1[k];
vi = vi + pt[k] * cs[k];
// if (n>=2)
{
dvdr = dvdr + (n+1) * pt[k] * cs[k];
dvdcolat = dvdcolat + ptt[k] * cs[k];
}
}
else
{
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
n = k + m;
pt[ind + n - m] = aprn[n] * pbar[k] * cosml[m];
pt[ind + n - m + l] = aprn[n] * pbar[k] * sinml[m];
ptt[ind + n - m] = aprn[n] * pbar1[k] * cosml[m];
ptt[ind + n - m + l] = aprn[n] * pbar1[k] * sinml[m];
vi = vi + pt[ind + n - m] * cs[ind + n - m];
vi = vi + pt[ind + n - m + l] * cs[ind + n - m + l];
dvdcolat = dvdcolat + ptt[ind + n - m] * cs[ind + n - m];
dvdcolat = dvdcolat + ptt[ind + n - m + l] * cs[ind + n - m + l];
dvdlon = dvdlon - m * pt[ind + n - m + l] * cs[ind + n - m];
dvdlon = dvdlon + m * pt[ind + n - m] * cs[ind + n - m + l];
dvdr = dvdr + (n+1) * pt[ind + n - m] * cs[ind + n - m];
dvdr = dvdr + (n+1) * pt[ind + n - m + l] * cs[ind + n - m + l];
}
}
}
// dvdcolat = - dvdcolat * sincolat; //tmd!!
dvdcolat = dvdcolat;
dvdlon = + dvdlon;
dvdr = - dvdr / r;
accn[0] = - dvdcolat / r;
accn[1] = + dvdlon / r / sincolat;
accn[2] = - dvdr;
c_en[0] = - sinf * coslon; //from fixed to up-east-north system: rmat
c_en[1] = - sinlon;
c_en[2] = - cosf * coslon;
c_en[3] = - sinf * sinlon;
c_en[4] = coslon;
c_en[5] = - cosf * sinlon;
c_en[6] = cosf;
c_en[7] = 0;
c_en[8] = - sinf;
// mt(info[num].c_ie, 3, 3, info[num].c_ei);
// brmul (info[num].c_ei, c_en, 3, 3, 3, c_in); //inertial to fixed matrix gmat = rmat*tbt
// brmul(c_in, accn, 3, 3, 1, acc); //from fixed acc to inertial acc
brmul(c_en, accn, 3, 3, 1, acc); //from fixed acc to inertial acc
// *v = vi;
// *dvdt = - ANGVEL * dvdlon;
free (pbar);
free (pbar1);
free (pbar2);
free (pt);
free (ptt);
free (cosml);
free (sinml);
free (aprn);
return 1;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lgdr(double t, int nmax, int m, double *pbar)
/*
! THIS CALCULATES THE FULLY NORMALIZED LEGENDRE FUNCTION WITH GIVEN ORDER(M),
! MAXIMUM DEGREE (NMAX), AND GIVEN EVALUATION POINT, T (COSINES OF COLATITUDE).
! THIS RETURNS ALL Pn,m, P'n,m, AND P''n,m (m=<n<=Nmax).
! THE RECURSION FORMULAR FOR THE FUNCTION ITSELF IS GIVEN IN JEKELI(1996).
! THE RECURSION FORMULAR FOR THE 1ST DERIVATIVE IS GIVEN IN TSCHERNING, ET AL(1983).
! THE FORMULAR FOR THE 2ND DERIVATIVE IS FROM THE ASSOCIATE LEGENDRE EQUATION.
! NOTE : EQUATIONS GIVEN IN TSCHERNING, ET AL(1983) HAVE ERRATA.
!
! S.C. Han, 1/24/01 (MODIFIED FOR CRAY T94 2/13/01)
!
*/
{
int i;
//REAL*8 :: PBAR(NMAX-M+1),PBAR1(NMAX-M+1),PBAR2(NMAX-M+1),T,P00,P11,C,D
double p00, p11, c, d;
//! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION
//! Pm,m : JEKEIL (A.3c) & (A.3d) , P'm,m : TSCHERNING (7)
p00 = 1.0;
p11 = sqrt (3.0*(1.0-t*t));
if (m>=1)
{
pbar[0] = p11;
for (i = 2; i <= m; i++)
{
pbar[0] = sqrt((2.0*i+1.0)/(2.0*i)*(1.0-t*t))*pbar[0];
}
}
else
{
pbar[0]=p00;
}
if (nmax - m + 1 >= 2)
{
pbar[1] = sqrt(2.0*m +3.0) * t * pbar[0];
}
for(i = 3; i <= nmax-m+1; i++)
{
c=((2.0*m+2.0*i-3.0) * (2.0*m + 2.0*i-1.0)) / ((i-1.0)*(2.0*m+i-1.0));
d=((2.0*m+2.0*i-1.0)*(2.0*m+i-2.0)*(i-2.0))
/ ((2.0*m+2.0*i-5.0)*(i-1.0)*(2.0*m+i-1.0));
pbar[i-1] = sqrt(c)*t*pbar[i-2] - sqrt(d) * pbar[i-3];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lgdr2(double t, int nmax, int m,
double *pbar, double *pbar1, double *pbar2)
/*
! THIS CALCULATES THE FULLY NORMALIZED LEGENDRE FUNCTION WITH GIVEN ORDER(M),
! MAXIMUM DEGREE (NMAX), AND GIVEN EVALUATION POINT, T (COSINES OF COLATITUDE).
! THIS RETURNS ALL Pn,m, P'n,m, AND P''n,m (m=<n<=Nmax).
! THE RECURSION FORMULAR FOR THE FUNCTION ITSELF IS GIVEN IN JEKELI(1996).
! THE RECURSION FORMULAR FOR THE 1ST DERIVATIVE IS GIVEN IN TSCHERNING, ET AL(1983).
! THE FORMULAR FOR THE 2ND DERIVATIVE IS FROM THE ASSOCIATE LEGENDRE EQUATION.
! NOTE : EQUATIONS GIVEN IN TSCHERNING, ET AL(1983) HAVE ERRATA.
!
! S.C. Han, 1/24/01 (MODIFIED FOR CRAY T94 2/13/01)
!
*/
{
int i;
//REAL*8 :: PBAR(NMAX-M+1),PBAR1(NMAX-M+1),PBAR2(NMAX-M+1),T,P00,P11,C,D
double p00, p11, c, d;
//! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION
//! Pm,m : JEKEIL (A.3c) & (A.3d) , P'm,m : TSCHERNING (7)
p00 = 1.0;
p11 = sqrt (3.0*(1.0-t*t));
if (m>=1)
{
pbar[0] = p11;
pbar1[0] = sqrt(3.0) * t;
for (i = 2; i <= m; i++)
{
pbar1[0] = sqrt((2.0*i+1.0)/(2.0*i))*(sqrt(1.0-t*t)*pbar1[0]+t*pbar[0]);
// pbar1[0] = sqrt((2.0*i+1.0)/(2.0*i))*(sqrt(1.0-t*t)*pbar1[0]+t*pbar[0]/(-sqrt(1.0-t*t)));
pbar[0] = sqrt((2.0*i+1.0)/(2.0*i)*(1.0-t*t))*pbar[0];
}
}
else
{
pbar[0]=p00;
pbar1[0]=0.0;
}
// ! Pm+1,m : JEKEIL (A.3b)
if (nmax - m + 1 >= 2)
{
pbar[1] = sqrt(2.0*m +3.0) * t * pbar[0];
}
// ! Pn,m (n>=m+2) : JEKEIL (A.3a)
for(i = 3; i <= nmax-m+1; i++)
{
c=((2.0*m+2.0*i-3.0) * (2.0*m + 2.0*i-1.0)) / ((i-1.0)*(2.0*m+i-1.0));
d=((2.0*m+2.0*i-1.0)*(2.0*m+i-2.0)*(i-2.0))/((2.0*m+2.0*i-5.0)*(i-1.0)*(2.0*m+i-1.0));
pbar[i-1] = sqrt(c)*t*pbar[i-2] - sqrt(d) * pbar[i-3];
}
// ! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION - 1ST DERIVATIVE
// ! P'n,m (n>=m+1) : TSCHERNING (8)
for (i=2; i<=nmax-m+1; i++)
{
c = 1.0/sqrt(1.0-t*t)*t*(m+i-1);
d = 1.0/sqrt(1.0-t*t)*sqrt((((m+i-1)*(m+i-1)-m*m)*(2.0*(m+i-1)+1.0))/(2.0*(m+i-1)-1.0));
//!! found it different from TSCHERNING (8),dcl-2010-2-14
//!! Jianbin confirms code is correct, dcl-2010-2-15
//!! D=1D0/SQRT(1D0-T**2)/SQRT((((M+I-1)**2-M**2)*(2D0*(M+I-1)+1D0))/(2D0*(M+I-1)-1D0))
pbar1[i-1] = c * pbar[i-1] - d * pbar[i-2];
}
//! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION - 2ND DERIVATIVE
//! P''n,m (n>=m) : ASSOCIATE LEGENDRE EQUATION (2ND ORDER DIFFERENTIAL EQN.)
for (i=1;i<=nmax-m+1;i++)
{
pbar2[i-1] = (-t/sqrt(1.0-t*t)) * pbar1[i-1]
- ((m+i-1)*(m+i)-m*m/(1.0-t*t)) * pbar[i-1];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_slrad (double *tjd, double *xic, double *acc)
{
double j, c1, ap, m, rsp, usp[3], xis[6], xsc[6], f,
xist[9], unit[9], rsp3;
short int n, sun;
short int ssbary = 11;
sun = 10;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
planet_ephemeris (tjd, sun, CT, &xsc[0], &xsc[3]);
for (n = 0; n <= 5; n++)
{
xis[n] = xic[n] - xsc[n];
}
rsp = sqrt (xis[0] * xis[0] + xis[1] * xis[1] + xis[2] * xis[2]);
usp[0] = xis[0] / rsp;
usp[1] = xis[1] / rsp;
usp[2] = xis[2] / rsp;
j = 1352.5; //kg/s3
// j = 1359.4; //kg/s3
// m = SATMASS; //kg
// ap = SATAREA; //m2
c1 = j / C * 1 * 1; //kg/s2/m*au*au
f = c1 * AMR / rsp / rsp;
// f = c1 * ap / m / rsp / rsp;
//kg/s2/m*au*au * m2 / kg / au / au = m/s2
f = f / AU * 86400.0 * 86400.0;
acc[0] = f * usp[0];
acc[1] = f * usp[1];
acc[2] = f * usp[2];
return 0;
}
double accel_nbody (double *tjd, double *xic, double *fnt, double *fgr)
{
int n;
short int ssbary = 11;
double xcb[6], xib[6], fnti[3], fntb[3], fgri[3], fgrb[3];
planet_ephemeris (tjd, CT, ssbary, &xcb[0], &xcb[3]);
force_bcrs (tjd, xcb, CT, fntb, fgrb);
for (n = 0; n <= 5; n++)
{
xib[n] = xic[n] + xcb[n];
}
force_bcrs (tjd, xib, CT, fnti, fgri);
// force_bcrs (tjd, xib, 99, fnti, fgri);
for (n = 0; n <= 2; n++)
{
fnt[n] = fnti[n] - fntb[n];
fgr[n] = fgri[n] - fgrb[n];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double force_bcrs (double *jd, double *xi, short int exclude,
double *fnt, double *fgr)
{
double xj[11][6], xij[11][6], rij[11], xjk[6], rjk,
xddj[3], sumil, sumjk, sdi2, sdj2, rdirdj, rrrdr2, rjirdd, gm[11], GMDE[11],
rij5, rij3, xijt[9], gra, grb, beta, gamma, unit[9],gm2de;
short int ssbary, l, k, j, n, flag_gr;
gm[0] = 2.203208082807623e+13;
gm[1] = 3.248586038641429e+14;
gm[2] = 398600.44150E+09;
gm[3] = 4.28283719012840e+13;
gm[4] = 1.267127698227696e+17;
gm[5] = 3.794062664949063e+16;
gm[6] = 5.794549096929744e+15;
gm[7] = 6.836534169987595e+15;
gm[8] = 9.816009029289940e+11;
gm[9] = 4.902801056E+12;
gm[10] = 1.32712442076e20;
gm2de = 86400.0 * 86400.0 / AU / AU / AU;
for (n = 0; n <= 10; n++)
GMDE[n] = gm[n] * gm2de;
ssbary = 11;
// ssbary = 10;
gamma = 1.0;
beta = 1.0;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
for (j = 0; j <= 10; j++)
{
planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
}
for (n = 0; n < 3; n ++)
fnt[n] = 0;
for (j = 0; j <= 10; j++)
{
if (j == exclude)
continue;
for (n = 0; n < 3; n++)
fnt[n] = fnt[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdi2 = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
sumil = 0;
for (l = 0; l < 11; l ++)
{
if ( l == exclude)
continue;
sumil = sumil + GMDE[l] / rij[l];
}
for (n = 0; n < 3; n ++)
fgr[n] = 0;
for (j = 0; j < 11; j ++)
{
if (j == exclude)
continue;
sumjk = 0;
for (n = 0; n < 3; n ++)
xddj[n] = 0;
for (k = 0; k < 11; k ++)
{
if (k == j)
continue; //k!=j
for (n = 0; n < 3; n++)
xjk[n] = xj[j][n] - xj[k][n];
rjk = sqrt (xjk[0] * xjk[0] + xjk[1] * xjk[1] + xjk[2] * xjk[2]);
sumjk = sumjk + GMDE[k] / rjk;
for (n = 0; n < 3; n ++)
xddj[n] = xddj[n] - GMDE[k] / (rjk * rjk * rjk) * xjk[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdj2 = xj[j][3] * xj[j][3] + xj[j][4] * xj[j][4]
+ xj[j][5] * xj[j][5];
rdirdj = xi[3] * xj[j][3] + xi[4] * xj[j][4] + xi[5] * xj[j][5];
rrrdr2 = pow( ( xij[j][0] * xj[j][3] + xij[j][1] * xj[j][4]
+ xij[j][2] * xj[j][5]) / rij[j], 2);
rjirdd = - ( xij[j][0] * xddj[0] + xij[j][1] * xddj[1]
+ xij[j][2] * xddj[2]);
gra = - 2 * (beta + gamma) * sumil - (2 * beta -1) * sumjk
+ gamma * sdi2 + (1 + gamma) * sdj2
- 2 * (1 + gamma) * rdirdj - 1.5 * rrrdr2 + 0.5 * rjirdd;
grb = xij[j][0] * ((2+2*gamma) * xi[3] - (1+2*gamma) * xj[j][3])
+ xij[j][1] * ((2+2*gamma) * xi[4] - (1+2*gamma) * xj[j][4])
+ xij[j][2] * ((2+2*gamma) * xi[5] - (1+2*gamma) * xj[j][5]);
for (n = 0; n < 3; n ++)
{
fgr[n] = fgr[n]
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* ( - xij[j][n]) * gra / C_AUDAY / C_AUDAY
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* xij[j][n + 3] * grb / C_AUDAY / C_AUDAY
+ GMDE[j] / rij[j] * (3 + 4 * gamma) * 0.5
* xddj[n] / C_AUDAY / C_AUDAY;
}
}
return 1;
}
double modvect (double *v)
{
return sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
}
double dotvect (double *v1, double *v2)
{
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2];
}
void crsvect (double *v1, double *v2, double *v)
{
v[0] = v1[1] * v2[2] - v1[2] * v2[1];
v[1] = v1[2] * v2[0] - v1[0] * v2[2];
v[2] = v1[0] * v2[1] - v1[1] * v2[0];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* chosephase
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double chosephase (double sinvalue, double cosvalue)
{
double sv = sinvalue, cv = cosvalue;
if (sv >= 0 && cv >= 0)
return (asin (sv));
if (sv > 0 && cv < 0)
return (acos (cv));
if (sv < 0 && cv < 0)
return ( - asin (sv) + TWOPI / 2.0);
else
return (asin (sv) + TWOPI);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/****************************************************************************/
/* */
/* Functions for Runge-Kutta integrator */
/* */
/* Version: 2009-9-8 */
/* */
/* Copyright (c) 2009 shangkun@shao.ac.cn All Right Reserved */
/* */
/****************************************************************************/
/*
Version: 2009-9-8
Version: 2009-9-13 integrate forwards & backwards
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double rkf78 (double jd, double t, double h, double *x, int dim,
void (*fun)(int, double, double,double *,double *))
// double (*fun)(double, double,double *,double *))
/*
purpose: auto-adjusted Runge-Kutta-Ful... integrator
input: double h integration step
double t integrate from t to t+h
double *x x(t)
int dim dim(x)
double err tolerance of step control
double (*fun)() right(force) function
output: double *x x(t+h)
return: h new step after adjustment
*/
{
int i, j, n, flag = 0;
double *y, *k, *f, d = 0, tn;
double a[13] = { 0, 2.0/27, 1.0/9, 1.0/6, 5.0/12, 1.0/2, 5.0/6, 1.0/6,
2.0/3, 1.0/3, 1.0, 0, 1.0 };
double c[13] = { 0, 0, 0, 0, 0, 34.0/105, 9.0/35, 9.0/35, 9.0/280,
9.0/280, 0, 41.0/840, 41.0/840 };
double b[13][12] =
{
{0},
{2.0/27},
{1.0/36,1.0/12},
{1.0/24,0,1.0/8},
{5.0/12,0,-25.0/16,25.0/16},
{1.0/20,0,0,1.0/4,1.0/5},
{-25.0/108,0,0,125.0/108,-65.0/27,125.0/54},
{31.0/300,0,0,0,61.0/225,-2.0/9,13.0/900},
{2.0,0,0,-53.0/6,704.0/45,-107.0/9,67.0/90,3.0},
{-91.0/108,0,0,23.0/108,-976.0/135,311.0/54,-19.0/60,17.0/6,-1.0/12},
{2383.0/4100,0,0,-341.0/164,4496.0/1025,-301.0/82,2133.0/4100,
45.0/82,45.0/164,18.0/41},
{3.0/205,0,0,0,0,-6.0/41,-3.0/205,-3.0/41,3.0/41,6.0/41},
{-1777.0/4100,0,0,-341.0/164,4496.0/1025,-289.0/82,2193.0/4100,
51.0/82,33.0/164,12.0/41,0,1.0}
};
y = (double *) calloc (dim, sizeof(double));
k = (double *) calloc (dim*13, sizeof(double));
f = (double *) calloc (dim, sizeof(double));
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
do
{
for (i = 0; i <= 12; i++)
{
tn = t + a[i] * h;
for (n = 0; n <= dim - 1; n++)
{
y[n] = x[n];
for (j = 0; j <= i-1; j++)
y[n] = y[n] + h * b[i][j] * k[n*13+j];
}
fun (dim, jd, tn, y, f);
// fun (jd, tn, y, f);
for (n = 0; n <= dim - 1; n++)
{
k[n*13+i] = f[n];
}
}
d = 0;
for (n = 0; n <= dim - 1; n++)
{
d = d + fabs (41.0 / 840 * (k[n*13+0] + k[n*13+10]
- k[n*13+11] - k[n*13+12]) * h);
}
flag = 0;
}while (flag == 1);
for (n = 0; n <= dim - 1; n++)
{
for (i = 0; i <= 12; i++)
x[n] = x[n] + h * c[i] * k[n*13+i];
}
free (y);
free (f);
free (k);
return h;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* mt -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void mt (double *a, int m, int n, double *b)
{
int i, j;
for (i = 0; i <= m - 1; i++)
{
for (j = 0; j <= n - 1; j++)
b[j * m + i] = a[i * n + j];
}
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* brmul -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void brmul (double *a, double *b, int m,int n, int k,double *c)
{
int i, j, l, u;
for (i = 0; i <= m - 1; i++)
{
for (j = 0; j <= k - 1; j++)
{
u = i * k + j;
c[u] = 0.0;
for (l = 0; l <= n - 1; l++)
c[u] = c[u] + a[i * n + l] * b[l * k + j];
}
}
return;
}
void xyz2rtn(double *x, double *v, double *xyz, double *rtn)
{
double scal_x, scal_v, vr[3], vn[3], vt[3];
scal_x=sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
scal_v=sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]);
// c...unit vector in R direction
vr[0]=x[0]/scal_x;
vr[1]=x[1]/scal_x;
vr[2]=x[2]/scal_x;
// c...unit direction in N direction
vn[0]=(vr[1]*v[2]-vr[2]*v[1])/scal_v;
vn[1]=(vr[2]*v[0]-vr[0]*v[2])/scal_v;
vn[2]=(vr[0]*v[1]-vr[1]*v[0])/scal_v;
// c...unit direction in T direction
vt[0]=(vn[1]*vr[2]-vn[2]*vr[1]);
vt[1]=(vn[2]*vr[0]-vn[0]*vr[2]);
vt[2]=(vn[0]*vr[1]-vn[1]*vr[0]);
// drtn(i,4)=dsqrt(dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2])
rtn[0]=xyz[0]*vr[0]+xyz[1]*vr[1]+xyz[2]*vr[2];
rtn[1]=xyz[0]*vt[0]+xyz[1]*vt[1]+xyz[2]*vt[2];
rtn[2]=xyz[0]*vn[0]+xyz[1]*vn[1]+xyz[2]*vn[2];
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* lagrange interpolation order = 6, 2*order points
* @param1: description of param1
* @param2: description of param2
* todo
order = input parameter
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lagrange (double *y, int dim_y, int dim_x, double t, double *z)
{
int i, j, k, m, dim, order = 8;
double s;
i = 0;
while ((y[i * dim_x] < t) && (i < dim_y))
i = i + 1;
k = i - order;
if (k < 0)
k = 0;
m = i + order - 1;
if (m > dim_y - 1)
m = dim_y - 1;
for (dim = 0; dim < dim_x - 1; dim++)
{
z[dim] = 0;
}
for (i = k; i <= m; i++)
{
s = 1.0;
for (j = k; j <= m; j++)
{
if (j != i)
{
s = s * (t - y[j * dim_x]) / (y[i * dim_x] - y[j * dim_x]);
}
}
for (dim = 0; dim < dim_x - 1; dim++)
{
z[dim] = z[dim] + s * y[i * dim_x + dim + 1];
}
}
return 0;
}
double obs_alt (double jd, double utc, double *obs, int part, double *bmat)
{
int n, lps, i;
double r, h, ref, *eph, *dxdp, *dodpo, *dodpd, *dodpp, xc2[6], dxdx0[36], dodx[6],
dodx0[6], tt, tjd[2], xsc[6], dx[3];
ref = RCT; //should be topography height
lps = getlps (JD0 + utc/86400.0);
tt = utc + (lps + 32.184);
// tjd[0] = JD0; tjd[1] = tt / 86400.0;
// get_ephemeris (tjd, 2, CT, xsc);
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tt, xc2);
}
if (part == 1)
{
eph = (double *) calloc (42 + 6 * MDYN, sizeof(double));
lagrange (OR_EPH, DIM_OR, 42 + 6 * MDYN + 1, tt, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n];
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n + 6];
if (MDYN > 0)
{
dxdp = (double *) calloc (6 * MDYN, sizeof(double));
dodpd = (double *) calloc (MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
dxdp[n] = eph[n + 42];
}
if (MOBS > 0)
dodpo = (double *) calloc (MOBS, sizeof(double));
free (eph);
}
h = modvect(xc2) - RCT;
for (n = 0; n < 3; n++)
dx[n] = xc2[n];
r = modvect(dx);
*obs = r - ref + BASB + BAST * utc;
if (part == 0)
return h;
for (n = 0; n < 3; n++)
{
dodx[n] = (xc2[n])/r;
dodx[n + 3] = 0;
}
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
for (n = 0; n < 6; n++)
bmat[n] = dodx0[n];
if (MEST == 0)
return h;
i = 0;
if (MOBS > 0)
{
dodpo[i] = 1;
i++;
}
if (MOBS > 1)
{
dodpo[i] = utc;
i++;
}
if (MDYN > 0)
{
brmul (dodx, dxdp, 1, 6, MDYN, dodpd);
}
if (MOBS > 0)
for (n = 0; n < MOBS; n++)
bmat[6 + n] = dodpo[n];
if (MDYN > 0)
for (n = 0; n < MDYN; n++)
bmat[6 + MOBS + n] = dodpd[n];
if (MDYN > 0)
{
free (dxdp);
free (dodpd);
}
if (MOBS > 0)
free (dodpo);
return h;
}
double obs_vel (double jd, double utc, double *obs, int part, double *bmat)
{
int n, lps, i;
double v, h, *eph, *dxdp, *dodpo, *dodpd, *dodpp, xc2[6], dxdx0[36], dodx[6],
dodx0[6], tt, tjd[2], xsc[6], dv[3];
lps = getlps (JD0 + utc/86400.0);
tt = utc + (lps + 32.184);
tjd[0] = JD0; tjd[1] = tt / 86400.0;
get_ephemeris (tjd, 2, CT, xsc);
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tt, xc2);
}
if (part == 1)
{
eph = (double *) calloc (42 + 6 * MDYN, sizeof(double));
lagrange (OR_EPH, DIM_OR, 42 + 6 * MDYN + 1, tt, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n];
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n + 6];
if (MDYN > 0)
{
dxdp = (double *) calloc (6 * MDYN, sizeof(double));
dodpd = (double *) calloc (MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
dxdp[n] = eph[n + 42];
}
if (MOBS > 0)
dodpo = (double *) calloc (MOBS, sizeof(double));
free (eph);
}
h = modvect(xc2) - RCT;
for (n = 0; n < 3; n++)
dv[n] = xc2[n + 3] - xsc[n + 3];
v = modvect(dv);
*obs = v + BASB + BAST * utc;
if (part == 0)
return h;
for (n = 0; n < 3; n++)
{
dodx[n] = 0;
dodx[n + 3] = (xc2[n + 3] - xsc[n + 3])/v;
}
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
for (n = 0; n < 6; n++)
bmat[n] = dodx0[n];
if (MEST == 0)
return h;
i = 0;
if (MOBS > 0)
{
dodpo[i] = 1;
i++;
}
if (MOBS > 1)
{
dodpo[i] = utc;
i++;
}
if (MDYN > 0)
{
brmul (dodx, dxdp, 1, 6, MDYN, dodpd);
}
if (MOBS > 0)
for (n = 0; n < MOBS; n++)
bmat[6 + n] = dodpo[n];
if (MDYN > 0)
for (n = 0; n < MDYN; n++)
bmat[6 + MOBS + n] = dodpd[n];
if (MDYN > 0)
{
free (dxdp);
free (dodpd);
}
if (MOBS > 0)
free (dodpo);
return h;
}
double obs_dsn (double jd, double utc, double *obs, int part, double *bmat)
{
int n, lps, i;
double r, h, *eph, *dxdp, *dodpo, *dodpd, *dodpp, xc2[6], dxdx0[36], dodx[6],
dodx0[6], tt, tjd[2], xsc[6], dx[3];
lps = getlps (JD0 + utc/86400.0);
tt = utc + (lps + 32.184);
tjd[0] = JD0; tjd[1] = tt / 86400.0;
get_ephemeris (tjd, 2, CT, xsc);
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tt, xc2);
}
if (part == 1)
{
eph = (double *) calloc (42 + 6 * MDYN, sizeof(double));
lagrange (OR_EPH, DIM_OR, 42 + 6 * MDYN + 1, tt, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n];
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n + 6];
if (MDYN > 0)
{
dxdp = (double *) calloc (6 * MDYN, sizeof(double));
dodpd = (double *) calloc (MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
dxdp[n] = eph[n + 42];
}
if (MOBS > 0)
dodpo = (double *) calloc (MOBS, sizeof(double));
free (eph);
}
h = modvect(xc2) - RCT;
for (n = 0; n < 3; n++)
dx[n] = xc2[n] - xsc[n];
r = modvect(dx);
*obs = r + BASB + BAST * utc;
if (part == 0)
return h;
for (n = 0; n < 3; n++)
{
dodx[n] = (xc2[n] - xsc[n])/r;
dodx[n + 3] = 0;
}
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
for (n = 0; n < 6; n++)
bmat[n] = dodx0[n];
if (MEST == 0)
return h;
i = 0;
if (MOBS > 0)
{
dodpo[i] = 1;
i++;
}
if (MOBS > 1)
{
dodpo[i] = utc;
i++;
}
if (MDYN > 0)
{
brmul (dodx, dxdp, 1, 6, MDYN, dodpd);
}
if (MOBS > 0)
for (n = 0; n < MOBS; n++)
bmat[6 + n] = dodpo[n];
if (MDYN > 0)
for (n = 0; n < MDYN; n++)
bmat[6 + MOBS + n] = dodpd[n];
if (MDYN > 0)
{
free (dxdp);
free (dodpd);
}
if (MOBS > 0)
free (dodpo);
return h;
}
void getsolvefor ()
{
// MOBS = 0; //2;
// MSRP = 0; //2;
// MTK2 = 0; //1;
// MGCS = 1; //6;
MDYN = MSRP + MTK2 + MGCS; // dim of sensitivity matrix
MSOL = MOBS + MDYN + 6; // dim of regress matrix
MEST = MOBS + MDYN; //= MOBS + MDYN - 6
MSTA = 42 + MDYN * 6;
/*
if (MGCS > 0)
{
CSinfo = (CSStruct *) calloc ( MGCS, sizeof(CSStruct));
CSinfo[0].n = 2; CSinfo[0].m = 0; CSinfo[0].cs = 0;
// CSinfo[1].n = 3; CSinfo[1].m = 0; CSinfo[1].cs = 0;
}
*/
return;
}
void initsolvefor (double *xsm, double *x)
{
int i, k, n, m, ind, l, ic, is, label;
for (k = 0; k < 6; k ++)
{
x[k] = xsm[k];
}
i = 6;
if (MOBS > 0)
{
x[i] = BASB;
i++;
}
if (MOBS > 1)
{
x[i] = BAST;
i++;
}
if (MSRP > 0)
{
x[i] = SRPB;
i++;
}
if (MSRP > 1)
{
x[i] = SRPT ;
i++;
}
if (MTK2 > 0)
{
x[i] = K2;
i++;
}
if (MGCS > 0)
for (k = 0; k < MGCS; k ++)
{
n = CSinfo[k].n; m = CSinfo[k].m; label = CSinfo[k].cs;
if (m == 0)
{
x[i] = COEFG[n] + CSinfo[k].initv;
COEFG[n] = x[i];
}
else
{
l = NMAX - m + 1;
ind = NMAX + 1 + (2 * NMAX - m + 2) * (m - 1);
ic = ind + n - m;
is = ind + n - m + l;
if (label == 1)
{
x[i] = COEFG[ic] + CSinfo[k].initv;
COEFG[ic] = x[i];
}
if (label == -1)
{
x[i] = COEFG[is] + CSinfo[k].initv;
COEFG[is] = x[i];
}
}
i++;
}
return;
}
void updsolvefor (double *x)
{
int i, k, n, m, ind, l, ic, is, label;
i = 6;
if (MOBS > 0)
{
BASB = x[i];
i++;
}
if (MOBS > 1)
{
BAST = x[i];
i++;
}
if (MSRP > 0)
{
SRPB = x[i];
i++;
}
if (MSRP > 1)
{
SRPT = x[i];
i++;
}
if (MTK2 > 0)
{
K2 = x[i];
i++;
}
if (MGCS > 0)
for (k = 0; k < MGCS; k ++)
{
n = CSinfo[k].n; m = CSinfo[k].m; label = CSinfo[k].cs;
if (m == 0)
{
COEFG[n] = x[i];
}
else
{
l = NMAX - m + 1;
ind = NMAX + 1 + (2 * NMAX - m + 2) * (m - 1);
ic = ind + n - m;
is = ind + n - m + l;
if (label == 1)
{
COEFG[ic] = x[i];
}
if (label == -1)
{
COEFG[is] = x[i];
}
}
i++;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* simula_phase - simulate total phase count observable
* @param1: description of param1
* @param2: description of param2
* todo:
1 one-way doppler deltat accumlated error
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double simula_phase (double utc3, double utc0, double *station3,
short int uplink, double *station1, short int genrel,
double *calculable, double *azimuth, double *elevation,
short int part, double *bmat)
{
double txice[7], txics[7], *bmats, *bmate, deltat;
int n;
real128 lts2[3], lte2[3];
bmats = (double *) calloc ( SLOVEFOR, sizeof(double));
bmate = (double *) calloc ( SLOVEFOR, sizeof(double));
ltsolution (utc0, station3, uplink, station1, genrel, lts2,
azimuth, elevation, part, bmats, txics);
ltsolution (utc3, station3, uplink, station1, genrel, lte2,
azimuth, elevation, part, bmate, txice);
*calculable = (lte2[2] - lts2[2]) * C;
if (uplink == 0) //one-way doppler deltat, time correction
{
delta_tdb (txice, txics, &deltat);
*calculable = *calculable + deltat * (txice[0] - txics[0]) * C;
}
if (part == 1)
{
for (n = 0; n < 6 + DYNPAR; n++)
{
bmat[n] = bmate[n] - bmats[n];
}
bmat[8] = 1;
bmat[9] = utc3;
}
*calculable = *calculable + BIAS + DBIA * utc3;
free (bmats);
free (bmate);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* simula_dople - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
* todo:
1 one-way doppler deltat accumlated error
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double simula_dople (double utc3, double tc, double *station3,
short int uplink, double *station1, short int genrel,
double *calculable, double *azimuth, double *elevation,
short int part, double *bmat)
{
double txice[7], txics[7], *bmats, *bmate, deltat;
double dop_old, dop_new;
int n;
real128 lts2[3], lte2[3], dlt;
bmats = (double *) calloc ( SLOVEFOR, sizeof(double));
bmate = (double *) calloc ( SLOVEFOR, sizeof(double));
ltsolution (utc3 + tc / 2, station3, uplink, station1, genrel, lte2,
azimuth, elevation, part, bmate, txice);
ltsolution (utc3 - tc / 2, station3, uplink, station1, genrel, lts2,
azimuth, elevation, part, bmats, txics);
dop_old = (double) ((lte2[1] - lts2[1]) / (real128)tc);
dlt = lte2[2] - lts2[2];
dop_new = (double) (dlt / (real128)tc * (real128)C);
*calculable = dop_new;
if (uplink == 0) //one-way doppler deltat, proper time correction
{
delta_tdb (txice, txics, &deltat);
*calculable = *calculable + deltat * C;
}
if (part == 1)
{
for (n = 0; n < 6 + DYNPAR; n++)
{
bmat[n] = (bmate[n] - bmats[n]) / tc;
}
bmat[8] = 1;
bmat[9] = utc3;
}
*calculable = *calculable + BIAS + DBIA * utc3;
free (bmats);
free (bmate);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* delta_tdb - one-way doppler deltat, proper time correction
* txice: [0]: satellite TDB time(s), [1]~[7]satellite coordinates(AU, day)
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double delta_tdb (double *txice, double *txics, double *deltat)
{
double ie, is, t, ied, isd, tjde[2], tjds[2], xcbe[6], xcbs[6],
xibe[6], xibs[6];
short int ssbary, n;
ssbary = 11;
t = txice[0] - txics[0];
tjde[0] = JD0;
tjde[1] = txice[0] / 86400.0;
tjds[0] = JD0;
tjds[1] = txics[0] / 86400.0;
planet_ephemeris (tjde, CENTER, ssbary, &xcbe[0], &xcbe[3]);
for (n = 0; n < 6; n++)
xibe[n] = txice[n + 1] + xcbe[n];
planet_ephemeris (tjds, CENTER, ssbary, &xcbs[0], &xcbs[3]);
for (n = 0; n < 6; n++)
xibs[n] = txics[n + 1] + xcbe[n];
delta_iid (tjde, xibe, &ie, &ied);
delta_iid (tjds, xibs, &is, &isd);
*deltat = 1.0 / 2.0 * (ie + is) * t - 1.0 / 12.0 * (ied - isd) * t * t;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* delta_tdb - one-way light-time proper time correction
* txice: [0]: satellite TDB time(s), [1]~[7]satellite coordinates(AU, day)
* @param2: description of param2
* todo:
1 Uobl, time correction due to non-spherical potential
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double delta_iid (double *jd, double *xi, double *ii, double *id)
{
double uu, vv, L, ud, vd,
rdd[3], xj[11][6], xij[11][6], rij[11], rijd[11];
short int ssbary, j, n;
ssbary = 11;
L = 1.550520e-8;
uu = 0;
ud = 0;
for (n = 0; n < 3; n ++)
rdd[n] = 0;
for (j = 0; j <= 10; j++)
{
if (PERB[j] == 0)
continue;
planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
rijd[j] = (xij[j][0] * xij[j][3] + xij[j][1] * xij[j][4]
+ xij[j][2] * xij[j][5]) / rij[j];
uu = uu + GMDE[j] / rij[j];
ud = ud - GMDE[j] / rij[j] / rij[j] * rijd[j];
for (n = 0; n < 3; n++)
rdd[n] = rdd[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
vv = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
vd = 2.0 * (xi[3] * rdd[0] + xi[4] * rdd[1] + xi[5] * rdd[2]);
*ii = (uu + vv / 2.0) / C_AUDAY / C_AUDAY - L;
*id = (ud + vd / 2.0) / C_AUDAY / C_AUDAY / 86400.0;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* simula_range - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
* todo:
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double simula_range (double utc3, double *station3, short int uplink,
double *station1, short int genrel,
double *calculable, double *azimuth, double *elevation,
short int part, double *bmat)
{
double txic[7];
real128 lt[3];
ltsolution (utc3, station3, uplink, station1, genrel, lt,
azimuth, elevation, part, bmat, txic);
if (uplink == 0) //one-way range: only work for near-earth satellite
{
*calculable = (double) lt[0];
}
if (uplink == 1) //two/three-way range: work for deep space
{
*calculable = (double) (lt[2] * (real128)C);
}
if (part == 1)
{
bmat[8] = 1; //DBIAS
bmat[9] = utc3;
}
*calculable = *calculable + BIAS + DBIA * utc3;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* ltsolution - light time solution
* @param:
utc3 : unit: day;
station3 : receive station;
uplink : no uplink == 0; yes uplink == 1
station1 : transmit station;
genrel : no general relativity correction == 0; yes == 1
calculable : ;
azimuth : ;
elevation : ;
partial : no partial == 0 ; yes partial == 1
bmat : (partial == 0: satellite coordinates(6), partial == 1: partial)
txic : satellite coordinates(t2)
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double ltsolution (double utc_3, double *station3, short int uplink,
double *station1, short int genrel, real128 *lt,
double *azimuth, double *elevation, short int part,
double *bmat, double *txic)
{
double re3fi[3], re3[3], re1fi[3], re1[3], vec[3], xe3[6], re3n[3],
xe1[6], re1n[3], xc2[6],
secdiff,
ra, dec, zd, az, secdiff3, secdiff1,
utc_1, ut1_3, ut1_1, tt_3, tt_1, tdb_3, tdb_2, tdb_1,
ut1_utc, xp, yp, xp3, yp3, dx, dy, delta_t3, t, elong, u, v,
dxdx0[36], dxdp[6 * DYNPAR], dodx[6], dodp[DYNPAR],
dodx0[6], dodpp[DYNPAR], eph[42 + 6 * DYNPAR],
te[9], llh3[3], llh1[3];
real128 tao231, tao232, tao121, tao122, taoerr, r23, r12, xb3[6], xb2[6], xb1[6];
int n, flag, dim_par;
taoerr = 1.0e-12L; //1nanosec;
// taoerr = 1.0e-8L; //1nanosec;
dim_par = 42 + 6 * DYNPAR + 1;
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--light time iteration 2 -> 3 --*/
re3fi[0] = station3[0];
re3fi[1] = station3[1];
re3fi[2] = station3[2];
xyz2llh(re3fi, llh3);
elong = llh3[1] * DEG2RAD;
u = sqrt (re3fi[0] * re3fi[0] + re3fi[1] * re3fi[1]) / 1000.0;
v = re3fi[2] / 1000.0;
/*--time scales transformation --*/
geteop (utc_3, &xp3, &yp3, &ut1_utc, &dx, &dy);
delta_t3 = 32.184 + LEAPSECS - ut1_utc;
ut1_3 = utc_3 + ut1_utc;
tt_3 = utc_3 + (LEAPSECS + 32.184);
secdiff3 = iauDtdb (JD0, tt_3 / 86400.0, ut1_3 / 86400.0, elong, u, v);
tdb_3 = tt_3 + secdiff3;
/*--station coordinate interpolation--*/
lagrange (TE_EPH, DIM_TE, 10, utc_3, te);
brmul (te, re3fi, 3, 3, 1, re3);
lagrange (TE_EPH, DIM_TE, 10, utc_3 + 1.0, te);
brmul (te, re3fi, 3, 3, 1, re3n);
for (n = 0; n < 3; n++)
{
xe3[n] = re3[n] / AU;
xe3[n + 3] = (re3n[n] - re3[n]) / AU * 86400;
}
/*--satellite coordinate interpolation--*/
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tdb_3, xc2);
}
if (part == 1)
{
lagrange (OR_EPH, DIM_OR, dim_par, tdb_3, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n + 36];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--iteration--*/
r23 = lt_form (tdb_3, tdb_3, xe3, xc2, genrel, xb3, xb2);
tao231 = (real128)r23 * (real128)AU_SEC;
tdb_2 = tdb_3 - (double) tao231;
flag = -1;
do
{
flag++;
tao232 = tao231;
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tdb_2, xc2);
}
if (part == 1)
{
lagrange (OR_EPH, DIM_OR, dim_par, tdb_2, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n + 36];
}
r23 = lt_form (tdb_3, tdb_2, xe3, xc2, genrel, xb3, xb2);
tao231 = (real128)r23 * (real128)AU_SEC;
tdb_2 = tdb_3 - (double) tao231;
}while (fabsl (tao232-tao231) > taoerr);
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--light time iteration 1 -> 2--*/
if (uplink == 1)
{
re1fi[0] = station1[0];
re1fi[1] = station1[1];
re1fi[2] = station1[2];
xyz2llh(re1fi, llh1);
elong = llh1[1] * DEG2RAD;
u = sqrt (re1fi[0] * re1fi[0] + re1fi[1] * re1fi[1]) / 1000.0;
v = re1fi[2] / 1000.0;
/*--time scales transformation --*/
tdb_1 = tdb_2; //unit: s
tdb2tt (JD0 + tdb_1 / 86400.0, &t, &secdiff);
tt_1 = tdb_1 - secdiff;
utc_1 = tt_1 - (LEAPSECS + 32.184);
geteop (utc_1, &xp, &yp, &ut1_utc, &dx, &dy);
ut1_1 = utc_1 + ut1_utc;
secdiff1 = iauDtdb (JD0, tdb_1 / 86400.0, ut1_1 / 86400.0,
elong, u, v);
tt_1 = tdb_1 - secdiff1;
utc_1 = tt_1 - (LEAPSECS + 32.184);
/*--station coordinate interpolation--*/
lagrange (TE_EPH, DIM_TE, 10, utc_1, te);
brmul (te, re1fi, 3, 3, 1, re1);
lagrange (TE_EPH, DIM_TE, 10, utc_1 + 1.0, te);
brmul (te, re1fi, 3, 3, 1, re1n);
for (n = 0; n < 3; n++)
{
xe1[n] = re1[n] / AU;
xe1[n + 3] = (re1n[n] - re1[n]) / AU * 86400;
}
r12 = lt_form (tdb_1, tdb_2, xe1, xc2, genrel, xb1, xb2);
tao121 = (real128)r12 * (real128)AU_SEC;
tdb_1 = tdb_2 - (double) tao121;
/*--iteration--*/
flag = -1;
do
{
flag++;
tao122 = tao121;
/*--time scales transformation --*/
tdb2tt (JD0 + tdb_1 / 86400.0, &t,&secdiff);
tt_1 = tdb_1 - secdiff;
utc_1 = tt_1 - (LEAPSECS + 32.184);
geteop (utc_1, &xp, &yp, &ut1_utc, &dx, &dy);
ut1_1 = utc_1 + ut1_utc;
secdiff1 = iauDtdb (JD0, tdb_1 / 86400.0, ut1_1 / 86400.0,
elong, u, v);
tt_1 = tdb_1 - secdiff1;
utc_1 = tt_1 - (LEAPSECS + 32.184);
/*--station coordinate interpolation--*/
lagrange (TE_EPH, DIM_TE, 10, utc_1, te);
brmul (te, re1fi, 3, 3, 1, re1);
lagrange (TE_EPH, DIM_TE, 10, utc_1 + 1.0, te);
brmul (te, re1fi, 3, 3, 1, re1n);
for (n = 0; n < 3; n++)
{
xe1[n] = re1[n] / AU;
xe1[n + 3] = (re1n[n] - re1[n]) / AU * 86400;
}
r12 = lt_form (tdb_1, tdb_2, xe1, xc2, genrel, xb1, xb2);
tao121 = (real128)r12 * (real128)AU_SEC;
tdb_1 = tdb_2 - (double) tao121;
}while (fabsl (tao122-tao121) > taoerr);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*-- partial time: tdb2 --*/
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tdb_2, bmat);
}
if (part == 1)
{
lagrange (OR_EPH, DIM_OR, dim_par, tdb_2, eph);
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n];
for (n = 0; n < 6 * DYNPAR; n++)
dxdp[n] = eph[n + 42];
lt_part (xb3, xb2, xb1, uplink, dodx, dodp);
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
brmul (dodx, dxdp, 1, 6, DYNPAR, dodpp);
for (n = 0; n < DYNPAR; n++)
dodp[n] = dodpp[n] + dodp[n];
for (n = 0; n < 3; n++)
{
bmat[n] = dodx0[n];
bmat[n + 3] = dodx0[n + 3] * 86400.0;
}
bmat[6] = dodp[0] * AU; // l/c: au,
bmat[7] = dodp[1] * AU * 86400.0; // l/(c*d-1): au*d,
}
txic[0] = tdb_2;
for (n = 1; n < 7; n++)
{
txic[n] = xc2[n - 1];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--calculate light time observable --*/
if (uplink == 0)
{
lt[0] = (real128)r23 * (real128)AU;
lt[1] = ((real128)utc_3 - (real128)tdb_2) * (real128)C;
lt[2] = (real128)tao231 - (real128)secdiff3 - (real128)(LEAPSECS + 32.184);
}
if (uplink == 1)
{
lt[0] = ((real128)r12 + (real128)r23) * (real128)AU;
lt[1] = ((real128)utc_3 - (real128)utc_1) * (real128)C;
lt[2] = (real128)tao231 + (real128)tao121 + (real128)secdiff1 - (real128)secdiff3;
// lt[2] = tao231 + tao121 + secdiff1 - secdiff3;
}
for (n = 0; n < 3; n++)
vec[n] = xb2[n] - xb3[n];
vector2radec (vec, &ra,&dec);
azelev (ut1_3 / 86400.0 + JD0, delta_t3, ACCURACY,
xp3, yp3, llh3, ra, dec, &zd, &az);
*azimuth = az;
*elevation = 90.0 - zd;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* lt_part - partial of light time
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lt_part (real128 *xb3, real128 *xb2, real128 *xb1, int uplink,
double *dodx, double *dodp)
{
double r23, r12, p23, p12, pt2, pt1, rp12, rpt1, dt2dx[3], dt1dx[3];
int n;
r23 = sqrt ((xb2[0] - xb3[0]) * (xb2[0] - xb3[0])
+ (xb2[1] - xb3[1]) * (xb2[1] - xb3[1])
+ (xb2[2] - xb3[2]) * (xb2[2] - xb3[2]));
p23 = ((xb3[0] - xb2[0]) * xb2[3] + (xb3[1] - xb2[1]) * xb2[4]
+ (xb3[2] - xb2[2]) * xb2[5]) / r23;
pt2 = (1 - p23 / C_AUDAY);
dt2dx[0] = (xb3[0] - xb2[0]) / r23 / pt2;
dt2dx[1] = (xb3[1] - xb2[1]) / r23 / pt2;
dt2dx[2] = (xb3[2] - xb2[2]) / r23 / pt2;
if (uplink == 0)
{
dodx[0] = - dt2dx[0];
dodx[1] = - dt2dx[1];
dodx[2] = - dt2dx[2];
}
if (uplink == 1)
{
r12 = sqrt ((xb2[0] - xb1[0]) * (xb2[0] - xb1[0])
+ (xb2[1] - xb1[1]) * (xb2[1] - xb1[1])
+ (xb2[2] - xb1[2]) * (xb2[2] - xb1[2]));
p12 = ((xb2[0] - xb1[0]) * xb1[3] + (xb2[1] - xb1[1]) * xb1[4]
+ (xb2[2] - xb1[2]) * xb1[5]) / r12;
pt1 = (1 - p12 / C_AUDAY);
rp12 = ((xb2[0] - xb1[0]) * (xb2[3] - xb1[3])
+ (xb2[1] - xb1[1]) * (xb2[4] - xb1[4])
+ (xb2[2] - xb1[2]) * (xb2[5] - xb1[5])) / r12;
rpt1 = (1 - (rp12 + p12) / C_AUDAY);
for (n = 0; n < 3; n++)
{
dt1dx[n] = (dt2dx[n] * rpt1 - (xb2[n] - xb1[n]) / r12) / pt1;
}
for (n = 0; n < 3; n++)
{
dodx[n] = - dt1dx[n];
}
}
dodx[3] = 0;
dodx[4] = 0;
dodx[5] = 0;
dodp[0] = 0; //
dodp[1] = 0; //
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* lt_form - calculate the light time equation
* @param:
tdb3, re3[3] : station time, coordinates (unit:AU)
tdb2, rp2[3] : satellite time, coordinates (AU)
genrel :
*rs3 : output: station coordinates to SSB (AU)
*rs2 : output: satellite coordinates to SSB (AU)
return : light time solution (AU)
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
real128 lt_form (double tdb3, double tdb2, double *re3, double *rp2,
int genrel, real128 *rs3, real128 *rs2)
{
double gamma, tjd2[2], tjd3[2], re[3], ve[3],
rp[3], vp[3], xe[6], xp[6];
real128 rlight, rgen, ri, rj, r12, rse[3], rsp[3], r23[3], rlt[3];
short int earth = 2, sun = 10, j;
int n;
gamma = 1;
tjd2[0] = JD0;
tjd3[0] = JD0;
tjd2[1] = tdb2 / 86400.0;
tjd3[1] = tdb3 / 86400.0;
planet_ephemeris (tjd3, earth, sun, &xe[0], &xe[3]);
for (n = 0; n < 6; n++)
rs3[n] = (real128)re3[n] + (real128)xe[n];
ri = sqrtl (rs3[0] * rs3[0] + rs3[1] * rs3[1] + rs3[2] * rs3[2]);
planet_ephemeris (tjd2, CENTER, sun, &xp[0], &xp[3]);
for (n = 0; n < 6; n++)
rs2[n] = (real128)rp2[n] + (real128)xp[n];
rj = sqrtl (rs2[0] * rs2[0] + rs2[1] * rs2[1] + rs2[2] * rs2[2]);
for (n = 0; n < 3; n++)
r23[n] = (real128)xe[n] - (real128)xp[n];
for (n = 0; n < 3; n++)
rlt[n] = (real128)r23[n] + (real128)re3[n] - (real128)rp2[n];
// rlight = sqrtl (((real128)rs3[0] - (real128)rs2[0]) * ((real128)rs3[0] - (real128)rs2[0])
// + ((real128)rs3[1] - (real128)rs2[1]) * ((real128)rs3[1] - (real128)rs2[1])
// + ((real128)rs3[2] - (real128)rs2[2]) * ((real128)rs3[2] - (real128)rs2[2]));
rlight = sqrtl( rlt[0] * rlt[0] + rlt[1] * rlt[1] + rlt[2] * rlt[2]);
rlight = sqrtl ((rs3[0] - rs2[0]) * (rs3[0] - rs2[0])
+ (rs3[1] - rs2[1]) * (rs3[1] - rs2[1])
+ (rs3[2] - rs2[2]) * (rs3[2] - rs2[2]));
if (genrel == 1)
{
rgen = (1L + (real128)gamma) * (real128)GMDE[10] / (real128)C_AUDAY / (real128)C_AUDAY
* logl ((ri + rj + rlight
+ (1L + (real128)gamma) * (real128)GMDE[10] / (real128)C_AUDAY / (real128)C_AUDAY)
/ (ri + rj - rlight
+ (1L + (real128)gamma) * (real128)GMDE[10] / (real128)C_AUDAY / (real128)C_AUDAY));
rlight = rlight + rgen;
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (j = 0; j <= 9; j++)
{
// if (j ==9) continue;
planet_ephemeris (tjd3, earth, j, re, ve);
for (n = 0; n < 3; n++)
rse[n] = (real128)re3[n] + (real128)re[n];
ri = sqrtl (rse[0] * rse[0] + rse[1] * rse[1] + rse[2] * rse[2]);
planet_ephemeris (tjd2, CENTER, j, rp, vp);
for (n = 0; n < 3; n++)
rsp[n] = (real128)rp2[n] + (real128)rp[n];
rj = sqrtl (rsp[0] * rsp[0] + rsp[1] * rsp[1] + rsp[2] * rsp[2]);
r12 = sqrtl((rse[0] - rsp[0]) * (rse[0] - rsp[0])
+ (rse[1] - rsp[1]) * (rse[1] - rsp[1])
+ (rse[2] - rsp[2]) * (rse[2] - rsp[2]));
rgen = (1L + (real128)gamma) * (real128)GMDE[j] / (real128)C_AUDAY / (real128)C_AUDAY
* logl ((ri + rj + r12 ) / (ri + rj - r12));
rlight = rlight + rgen;
}
}
return rlight;
}
void azelev (double jd_ut1, double delta_t, short int accuracy,
double x, double y, double *llh, double ra,
double dec, double *zd, double *az)
{
double sinlat, coslat, sinlon, coslon, sindc, cosdc, sinra, cosra,
uze[3], une[3], uwe[3], uz[3], un[3], uw[3], p[3], pz, pn, pw,
proj;
/*
Preliminaries.
*/
sinlat = sin (llh[0] * DEG2RAD);
coslat = cos (llh[0] * DEG2RAD);
sinlon = sin (llh[1] * DEG2RAD);
coslon = cos (llh[1] * DEG2RAD);
sindc = sin (dec * DEG2RAD);
cosdc = cos (dec * DEG2RAD);
sinra = sin (ra * 15.0 * DEG2RAD);
cosra = cos (ra * 15.0 * DEG2RAD);
/*
Set up orthonormal basis vectors in local Earth-fixed system.
Define vector toward local zenith in Earth-fixed system (z axis).
*/
uze[0] = coslat * coslon;
uze[1] = coslat * sinlon;
uze[2] = sinlat;
/*
Define vector toward local north in Earth-fixed system (x axis).
*/
une[0] = -sinlat * coslon;
une[1] = -sinlat * sinlon;
une[2] = coslat;
/*
Define vector toward local west in Earth-fixed system (y axis).
*/
uwe[0] = sinlon;
uwe[1] = -coslon;
uwe[2] = 0.0;
/*
Obtain vectors in celestial system.
Rotate Earth-fixed orthonormal basis vectors to celestial system
(wrt equator and equinox of date).
*/
ter2cel (jd_ut1,0.0,delta_t,1,accuracy,1,x,y,uze, uz);
ter2cel (jd_ut1,0.0,delta_t,1,accuracy,1,x,y,une, un);
ter2cel (jd_ut1,0.0,delta_t,1,accuracy,1,x,y,uwe, uw);
/*
Define unit vector 'p' toward object in celestial system
(wrt equator and equinox of date).
*/
p[0] = cosdc * cosra;
p[1] = cosdc * sinra;
p[2] = sindc;
/*
Compute coordinates of object wrt orthonormal basis.
Compute components of 'p' - projections of 'p' onto rotated
Earth-fixed basis vectors.
*/
pz = p[0] * uz[0] + p[1] * uz[1] + p[2] * uz[2];
pn = p[0] * un[0] + p[1] * un[1] + p[2] * un[2];
pw = p[0] * uw[0] + p[1] * uw[1] + p[2] * uw[2];
/*
Compute azimuth and zenith distance.
*/
proj = sqrt (pn * pn + pw * pw);
if (proj > 0.0)
*az = -atan2 (pw, pn) * RAD2DEG;
if (*az < 0.0)
*az += 360.0;
if (*az >= 360.0)
*az -= 360.0;
*zd = atan2 (proj, pz) * RAD2DEG;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_pointmass - abandoned
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double fun_pointmass (double tdbs, double *x, double *f)
{
double fnt[3], fgr[3], r, s2, rrd, a, b;
int n, gamma;
gamma = 1;
f[0] = x[3];
f[1] = x[4];
f[2] = x[5];
r = sqrt (x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
s2 = x[3] * x[3] + x[4] * x[4] + x[5] * x[5];
rrd = x[0] * x[3] + x[1] * x[4] + x[2] * x[5];
a = 2 * (1 + gamma) * GMDE[CENTER] / r - gamma * s2;
b = 2 * (1 + gamma) * rrd;
for (n = 0; n < 3; n++)
fgr[n] = GMDE[CENTER] / C_AUDAY / C_AUDAY / r / r / r
* ( a * x[n] + b * x[n+3] );
fnt[0] = - GMDE[CENTER] / (r*r*r) * x[0];
fnt[1] = - GMDE[CENTER] / (r*r*r) * x[1];
fnt[2] = - GMDE[CENTER] / (r*r*r) * x[2];
for (n = 0; n < 3; n++)
{
f[3 + n] = fnt[n] + fgr[n];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullaccel -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double fun_fullaccel (double tdbs, double *xic, double *fxic)
{
int n;
short int ssbary = 11, part = 0;
double tjd[2], acc1[3], acc2[3], acc3[3], acc[3], dum1[1], dum2[1];
tjd[0] = JD0;
tjd[1] = tdbs;
accel_ntrel (tjd, xic, part, acc1, dum1, dum2);
accel_nonsp (tjd, xic, part, acc2, dum1, dum2);
accel_radpr (tjd, xic, part, acc3, dum1, dum2);
for (n = 0; n <= 2; n++)
{
acc[n] = acc1[n] + acc2[n] + acc3[n];
}
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullstate -transition matrix(36), orbit(6), sensitivity matrix(6*DYNPAR)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double fun_fullstate (double tdbs, double *state, double *fstate)
{
int n;
short int ssbary = 11, part = 1;
double tjd[2], xic[6], dfdx[36], dxdx0[36], dfdp[6 * DYNPAR],
dfdpp[6 * DYNPAR], dxdp[6 * DYNPAR],
acc1[3], dadr1[9], dadp1[3 * DYNPAR],
acc2[3], dadr2[9], dadp2[3 * DYNPAR],
acc3[3], dadr3[9], dadp3[3 * DYNPAR],
acc[3], dadr[9], dadp[3 * DYNPAR],
fxic[6], fdxdx0[36], fdxdp[6 * DYNPAR];
tjd[0] = JD0;
tjd[1] = tdbs;
for (n = 0; n < 36; n++)
{
dxdx0[n] = state[n];
}
for (n = 0; n < 6; n++)
{
xic[n] = state[n + 36];
}
for (n = 0; n < 6 * DYNPAR; n++)
{
dxdp[n] = state[n + 42];
}
/* acc, partial to xyz: dadr, partial to parameters dadp*/
accel_ntrel (tjd, xic, part, acc1, dadr1, dadp1);
accel_nonsp (tjd, xic, part, acc2, dadr2, dadp2);
accel_radpr (tjd, xic, part, acc3, dadr3, dadp3);
/*todo: air drag acc & partial to vxvyvz dadv*/
for (n = 0; n <= 2; n++)
{
acc[n] = acc1[n] + acc2[n] + acc3[n];
}
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr1[n] + dadr2[n] + dadr3[n];
}
for (n = 0; n < 3 * DYNPAR; n++)
{
dadp[n] = dadp1[n] + dadp2[n] + dadp3[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 36; n++)
{
dfdx[n] = 0;
}
dfdx[3] = 1;
dfdx[10] = 1;
dfdx[17] = 1;
for (n = 0; n < 3; n++)
{
dfdx[n + 18] = dadr[n];
dfdx[n + 24] = dadr[n + 3];
dfdx[n + 30] = dadr[n + 6];
}
brmul(dfdx, dxdx0, 6, 6, 6, fdxdx0);
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
brmul(dfdx, dxdp, 6, 6, DYNPAR, dfdpp);
for (n = 0; n < 3 * DYNPAR; n++)
{
dfdp[n] = 0;
dfdp[n + 3 * DYNPAR] = dadp[n];
}
for (n = 0; n < 6 * DYNPAR; n++)
{
fdxdp[n] = dfdpp[n] + dfdp[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 36; n++)
{
fstate[n] = fdxdx0[n];
}
for (n = 0; n < 6; n++)
{
fstate[n + 36] = fxic[n];
}
for (n = 0; n < 6 * DYNPAR; n++)
{
fstate[n + 42]= fdxdp[n];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_ntrel - Newtonian + Relativistic acceleration
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_ntrel (double *tjd, double *xic, short int part,
double *acc, double *dadr, double *dadp)
{
int n;
short int ssbary = 11;
double xcb[6], acb[3], xib[6], aib[3], dadr1[9], dum[9],
dadp0[3*DYNPAR], dadp1[3*DYNPAR];
planet_ephemeris (tjd, CENTER, ssbary, &xcb[0], &xcb[3]);
accel_bcrs (tjd, xcb, part, CENTER, acb, dum, dadp0);
for (n = 0; n <= 5; n++)
{
xib[n] = xic[n] + xcb[n];
}
accel_bcrs (tjd, xib, part, 99, aib, dadr1, dadp1);
for (n = 0; n <= 2; n++)
{
acc[n] = aib[n] - acb[n];
}
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr1[n];
}
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = dadp1[n] - dadp0[n];
}
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_bcrs - Newtonian + Relativistic acceleration
* @param1: description of param1
* @param2: description of param2
* todo:
1 partial to parameters
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_bcrs (double *jd, double *xi, short int part, short int exclude,
double *acc, double *dadr, double *dadp)
{
double fnt[3], fgr[3], xj[11][6], xij[11][6], rij[11], xjk[6], rjk,
xddj[3], sumil, sumjk, sdi2, sdj2, rdirdj, rrrdr2, rjirdd,
rij5, rij3, xijt[9], gra, grb, beta, gamma, unit[9];
short int ssbary, l, k, j, n, flag_gr;
ssbary = 11;
gamma = 1.0;
beta = 1.0;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
for (j = 0; j <= 10; j++)
{
planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
}
flag_gr = 0;
for (n = 0; n < 3; n ++)
fnt[n] = 0;
for (j = 0; j <= 10; j++)
{
if (PERB[j] == 2)
flag_gr = 1;
if (PERB[j] == 0)
continue;
if (j == exclude)
continue;
for (n = 0; n < 3; n++)
fnt[n] = fnt[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
if (part == 1)
{
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = 0;
}
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (j = 0; j <= 10; j++)
{
if (j == exclude)
continue;
rij5 = pow (rij[j], 5);
rij3 = pow (rij[j], 3);
brmul (xij[j], xij[j], 3,1,3, xijt);
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr[n] + 3 * GMDE[j] * xijt[n] / rij5
- GMDE[j] * unit[n] / rij3;
}
}
}
if (flag_gr == 0)
{
for (n = 0; n < 3; n++)
acc[n] = fnt[n];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdi2 = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
sumil = 0;
for (l = 0; l < 11; l ++)
{
if ( l == exclude)
continue;
if (PERB[l] != 2)
continue;
sumil = sumil + GMDE[l] / rij[l];
}
for (n = 0; n < 3; n ++)
fgr[n] = 0;
for (j = 0; j < 11; j ++)
{
if (PERB[j] != 2)
continue;
if (j == exclude)
continue;
sumjk = 0;
for (n = 0; n < 3; n ++)
xddj[n] = 0;
for (k = 0; k < 11; k ++)
{
if (k == j)
continue; //k!=j
if (PERB[k] != 2)
continue;
for (n = 0; n < 3; n++)
xjk[n] = xj[j][n] - xj[k][n];
rjk = sqrt (xjk[0] * xjk[0] + xjk[1] * xjk[1] + xjk[2] * xjk[2]);
sumjk = sumjk + GMDE[k] / rjk;
for (n = 0; n < 3; n ++)
xddj[n] = xddj[n] - GMDE[k] / (rjk * rjk * rjk) * xjk[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdj2 = xj[j][3] * xj[j][3] + xj[j][4] * xj[j][4]
+ xj[j][5] * xj[j][5];
rdirdj = xi[3] * xj[j][3] + xi[4] * xj[j][4] + xi[5] * xj[j][5];
rrrdr2 = pow( ( xij[j][0] * xj[j][3] + xij[j][1] * xj[j][4]
+ xij[j][2] * xj[j][5]) / rij[j], 2);
rjirdd = - ( xij[j][0] * xddj[0] + xij[j][1] * xddj[1]
+ xij[j][2] * xddj[2]);
gra = - 2 * (beta + gamma) * sumil - (2 * beta -1) * sumjk
+ gamma * sdi2 + (1 + gamma) * sdj2
- 2 * (1 + gamma) * rdirdj - 1.5 * rrrdr2 + 0.5 * rjirdd;
grb = xij[j][0] * ((2+2*gamma) * xi[3] - (1+2*gamma) * xj[j][3])
+ xij[j][1] * ((2+2*gamma) * xi[4] - (1+2*gamma) * xj[j][4])
+ xij[j][2] * ((2+2*gamma) * xi[5] - (1+2*gamma) * xj[j][5]);
for (n = 0; n < 3; n ++)
{
fgr[n] = fgr[n]
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* ( - xij[j][n]) * gra / C_AUDAY / C_AUDAY
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* xij[j][n + 3] * grb / C_AUDAY / C_AUDAY
+ GMDE[j] / rij[j] * (3 + 4 * gamma) * 0.5
* xddj[n] / C_AUDAY / C_AUDAY;
}
}
for (n = 0; n < 3; n++)
acc[n] = fgr[n] + fnt[n];
return 1;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_radpr - solar radiation press & partial to srp coefficients
* @param1: description of param1
* @param2: description of param2
* todo:
1 earth shadow
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_radpr (double *tjd, double *xic, short int part,
double *acc, double *dadr, double *dadp)
{
double j, c1, ap, m, rsp, usp[3], xis[6], xsc[6], f,
xist[9], unit[9], rsp3;
short int n, sun;
sun = 10;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
planet_ephemeris (tjd, sun, CENTER, &xsc[0], &xsc[3]);
for (n = 0; n <= 5; n++)
{
xis[n] = xic[n] - xsc[n];
}
rsp = sqrt (xis[0] * xis[0] + xis[1] * xis[1] + xis[2] * xis[2]);
usp[0] = xis[0] / rsp;
usp[1] = xis[1] / rsp;
usp[2] = xis[2] / rsp;
j = 1352.5; //kg/s3
m = SATMASS; //kg
ap = SATAREA; //m2
c1 = j / C * 1 * 1; //kg/s2/m*au*au
f = c1 * ap / m / rsp / rsp;
//kg/s2/m*au*au * m2 / kg / au / au = m/s2
f = f / AU * 86400.0 * 86400.0;
// acc[0] = f * usp[0] * (1 + CONS + DCON * tjd[1]);
// acc[1] = f * usp[1] * (1 + CONS + DCON * tjd[1]);
// acc[2] = f * usp[2] * (1 + CONS + DCON * tjd[1]);
acc[0] = f * usp[0];
acc[1] = f * usp[1];
acc[2] = f * usp[2];
if (part == 0)
return 1;
rsp3 = rsp * rsp * rsp;
brmul (xis, xis, 3,1,3, xist);
for (n = 0; n <= 8; n++)
dadr[n] = - f * (3 * xist[n] / rsp3 - unit[n] / rsp) ;
// * (1 + CONS + DCON * tjd[1]);
// for (n = 0; n <= 2; n++)
// {
// dadp[n * DYNPAR] = f * usp[n];
// dadp[n * DYNPAR + 1] = dadp[n * DYNPAR] * tjd[1];
// dadp[n * DYNPAR] = 0;
// dadp[n * DYNPAR + 1] = 0;
// }
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = 0;
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs_old(double *tjd, double gma1, double k2,
double *c20, double *c21, double *s21, double *c22, double *s22)
{
double gms2e;
double gmm2e;
// short int moon = 9, earth = 2, sun = 10;
short int moon = 2, earth = 9, sun = 10;
double ps[3], vs[3], pm[3], vm[3],
pse[3], pme[3], llrs[3], llrm[3], pbar[4], t,
p20m, p30m, p21m, p31m, p22m, p32m, p33m,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rerm, rers, tb[9], tbt[9];
// Luni-solar ephemeris
planet_ephemeris (tjd, sun, CENTER, ps, vs);
planet_ephemeris (tjd, moon, CENTER, pm, vm);
iau_pns (tjd, tb, CENTER);
mt (tb, 3, 3, tbt);
brmul (tbt,ps,3,3,1,pse);
brmul (tbt,pm,3,3,1,pme);
xyz2llh(pse, llrs);
xyz2llh(pme, llrm);
t = sin(llrm[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20m = pbar[2]; p30m = pbar[3];
lgdr(t, 3, 1, pbar); p21m = pbar[1]; p31m = pbar[2];
lgdr(t, 3, 2, pbar); p22m = pbar[0]; p32m = pbar[1];
lgdr(t, 3, 3, pbar); p33m = pbar[0];
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
gms2e = GMDE[sun]/GMDE[CENTER];
// gmm2e = GMDE[moon]/GMDE[CENTER];
gmm2e = 0;
rerm = gma1 / llrm[2];
rers = gma1 / llrs[2];
// Frequency Independent Terms
// C20
*c20 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C21/S21
*c21 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
*s21 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C22/S22
*c22 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
*s22 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_nonsp - non-spherical force
* @param1: description of param1
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_nonsp (double *tjd, double *xic, short int part,
double *acc, double *dadr, double *dadp)
{
int n, m, i;
static int flag = 0;
// static double *cn0, *cnm, *snm, gma[2];
double xfc[3] = {0}, tb[9] = {0}, tbt[9] ={0}, rmat[9] = {0},
gmat[9] = {0}, gmatt[9] = {0}, r, rxy, sinf, cosf, sinl, cosl,
lamta, *pn, *pnm, *pnp, *pnmp, *pnpp, *pnmpp,
frj[3] = {0}, frcs[3] = {0}, fr[3] = {0}, *cosml, *sinml, *aprn,
peprp[27], pepr[27], prtpx[9], prtpy[9], prtpz[9], pgtpx[9],
pgtpy[9], pgtpz[9], pgx[3], pgy[3], pgz[3], part1[9], prjpx[3],
prjpy[3], prjpz[3], prcspx[3], prcspy[3], prcspz[3], prpr[9],
gtpr[9], part2[9], prtpxx[9], prtpyy[9], prtpzz[9];
double cunit, dfd2r[3], dfd2[3], dfdkr[3], dfdk[3], k2, c20, c21, s21, c22, s22,
unit, nup, ndown;
if (part == 1)
{
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = 0;
}
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
}
if (GRAVDEGREE < 2)
{
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
return 1;
}
if (flag != 9) //
{
cn0 = (double *) calloc (GRAVDEGREE, sizeof(double));
cnm = (double *) calloc (GRAVDEGREE * GRAVDEGREE, sizeof(double));
snm = (double *) calloc (GRAVDEGREE * GRAVDEGREE, sizeof(double));
opengravfile (cn0, cnm, snm, gma);
flag = 9;
}
k2 = CONS;
stidecs_old(tjd, gma[1], k2, &c20, &c21, &s21, &c22, &s22);
// cn0[1] = (-8.745054708184200e-04 + c20 + DCON * 1.0e-16 * tjd[1]) * sqrt(5);
n = 2;
cn0[n-1] = (j2 + c20 + DCON * 1.0e-8) * sqrt(2*n+1);
n = 3;
cn0[n-1] = (j3 + BIAS * 1.0e-8) * sqrt(2*n+1);
n = 4;
cn0[n-1] = (j4 + DBIA * 1.0e-8) * sqrt(2*n+1);
n = 2; m = 1;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = (jc21 + c21)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = (js21 + s21)* unit;
}
n = 2; m = 2;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = (jc22 + c22)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = (js22 + s22)* unit;
}
// cn0[2] = -1.188691064601560e-05 * sqrt(7) * (1 + DCON);
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*****************************rotation matrix ********************************/
iau_pns (tjd, tb, CENTER); //tb
mt (tb, 3, 3, tbt); //tbt
brmul (tbt,xic,3,3,1,xfc); //rb
r = sqrt (xfc[0] * xfc[0] + xfc[1] * xfc[1] + xfc[2] * xfc[2]);
//define up-east-north system
rxy = sqrt (xfc[0] * xfc[0] + xfc[1] * xfc[1]);
sinf = xfc[2] / r;
cosf = rxy / r;
sinl = xfc[1] / rxy;
cosl = xfc[0] / rxy;
rmat[0] = cosf * cosl; //from fixed to up-east-north system: rmat
rmat[1] = cosf * sinl;
rmat[2] = sinf;
rmat[3] = -sinl;
rmat[4] = cosl;
rmat[5] = 0;
rmat[6] = -sinf * cosl;
rmat[7] = -sinf * sinl;
rmat[8] = cosf;
brmul (rmat,tbt,3,3,3,gmat); //inertial to fixed matrix gmat = rmat*tbt
mt (gmat, 3, 3, gmatt); //fixed to inertial matrix gmatt
lamta = chosephase (sinl, cosl); //rad
cosml = (double *) calloc ( GRAVDEGREE, sizeof(double)); //cos(m*lamta)
sinml = (double *) calloc ( GRAVDEGREE, sizeof(double)); //sin(m*lamta)
aprn = (double *) calloc ( GRAVDEGREE, sizeof(double)); //sin(m*lamta)
for (m = 1; m <= GRAVDEGREE; m++)
{
cosml[m-1] = cos(m*lamta);
sinml[m-1] = sin(m*lamta);
}
for (n = 1; n <= GRAVDEGREE; n++)
{
aprn[n-1] = pow (gma[1] / r, n);
}
/******************************************************************/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/****************Legendre Polynomial**********************************/
pn = (double *) calloc ( GRAVDEGREE, sizeof(double));
//Pn
pnp = (double *) calloc ( GRAVDEGREE, sizeof(double));
//Pn'
pnm = (double *) calloc ( GRAVDEGREE * GRAVDEGREE, sizeof(double));
//secf*Pmn
pnmp = (double *) calloc ( GRAVDEGREE * GRAVDEGREE, sizeof(double));
//cosf*Pmn'
pnpp = (double *) calloc ( GRAVDEGREE, sizeof(double));
//Pn''
pnmpp = (double *) calloc ( GRAVDEGREE * GRAVDEGREE, sizeof(double));
//cos2fPmn''
pn[0] = sinf;
pnp[0] = 1;
pnpp[0] = 0;
pn[1] = 3.0/2.0*sinf*sinf - 1.0/2.0;
pnp[1] = sinf + 2 * sinf;
pnpp[1] = 3;
for (n = 3; n <= GRAVDEGREE; n++)
{
pn[n-1] = (2 * n - 1.0) / n * sinf * pn[n-2]
- (n - 1.0) / n * pn[n-3]; //tmd!!!
pnp[n-1] = sinf * pnp[n-2] + n * pn[n-2];
pnpp[n-1] = sinf * pnpp[n-2] + (n+1) * pnp[n-2];
}
pnm[0] = 1; //secfP11 = 1
for (n = 2; n <= GRAVDEGREE; n++)
{
pnm[(n-1) * GRAVDEGREE + n - 1]
= (2 * n - 1.0) * cosf * pnm[(n - 2) * GRAVDEGREE + (n - 2)];
}
pnm[GRAVDEGREE] = (2 * 2.0 - 1.0) / (2 - 1.0) * sinf * pnm[0];
//secfP21 = pnm[GRAVDEGREE]
for (n = 3; n <= GRAVDEGREE; n++)
{
for (m = 1; m < n; m++)
{
pnm[(n-1) * GRAVDEGREE + (m-1)] = (2 * n - 1.0) / (n-m)
* sinf * pnm[(n-2) * GRAVDEGREE + (m-1)]
- (n + m - 1.0) / (n - m)
* pnm[(n - 3) * GRAVDEGREE + (m - 1)];
// printf ("%d\t%d\t%f\n", n, m, pnm[(n-1)*n2 + (m-1)]);
}
}
pnmp[0] = -sinf * pnm[0]; //cosfP11'
for (n = 2; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
pnmp[(n - 1) * GRAVDEGREE + (m - 1)] =
- n * sinf * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
+ (n + m) * pnm[(n - 2) * GRAVDEGREE + (m - 1)];
}
}
pnmpp[0] = sinf * pnmp[0] / cosf - pnm[0] * cosf;
//cos2fP11''
pnmpp[GRAVDEGREE] = sinf * pnmp[GRAVDEGREE] / cosf
- pnm[GRAVDEGREE] * cosf - 3 * sinf * pnm[GRAVDEGREE + 1];
//cos2fP21'' = pnmpp[GRAVDEGREE]
pnmpp[GRAVDEGREE + 1] = - 2 * pnm[GRAVDEGREE + 1] * cosf;
//cos2fP22'' = pnmpp[GRAVDEGREE+1]
for (n = 3; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
if (m == 1)
{
pnmpp[(n - 1) * GRAVDEGREE + (m - 1)] =
+ sinf * pnmp[(n - 1) * GRAVDEGREE + (m - 1)] / cosf
- pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
- 3 * sinf * pnm[(n - 1) * GRAVDEGREE + (m - 1) + 1]
+ pnm[(n - 1) * GRAVDEGREE + (m - 1) + 2] * cosf;
}
else
{
pnmpp[(n-1)*GRAVDEGREE + (m-1)] = - (n - 2) * sinf
* pnmp[(n - 1) * GRAVDEGREE + (m-1)] / cosf
+ (n + m) * pnmp[(n - 2) * GRAVDEGREE + (m-1)] / cosf
- n * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf;
}
}
}
/*****************Legendre Polynomial**********************************/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/**********************************************************/
for (n = 1; n <= GRAVDEGREE; n++)
{
frj[0] = frj[0] + (-cn0[n - 1]) * aprn[n - 1] * (n + 1) * pn[n - 1];
frj[1] = frj[1] + 0;
frj[2] = frj[2] + (-cn0[n - 1]) * aprn[n - 1] * (-cosf) * pnp[n - 1];
}
for (n = 1; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
if ( n == GRAVDEGREE && m > GRAVORDER)
{
// printf ("%d\t%d\n",n,m);
break;
}
frcs[0] = frcs[0] + aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
frcs[1] = frcs[1] + aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (-cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
frcs[2] = frcs[2] + aprn[n-1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
}
for (n = 0; n < 3; n++)
{
fr[n] = (frj[n] + frcs[n]) * gma[0] / r / r;
}
brmul(gmatt,fr,3,3,1,acc); //from fixed acc to inertial acc
if (part == 0)
{
free (pn);
free (pnp);
free (pnm);
free (pnmp);
free (pnpp);
free (pnmpp);
free (cosml);
free (sinml);
free (aprn);
return 1;
}
/*************************************************************/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/************************partial part1***********************************/
peprp[0] = 0;
peprp[1] = - sinl / r;
peprp[2] = - sinf * cosl / r; //(573)
peprp[3] = 0;
peprp[4] = cosl / r;
peprp[5] = - sinf * sinl / r; //(574) //11.10change
peprp[6] = 0;
peprp[7] = 0;
peprp[8] = cosf / r; //(575)
peprp[9] = 0;
peprp[10] = - cosl / r / cosf;
peprp[11] = 0; //(576)
peprp[12] = 0;
peprp[13] = - sinl / r / cosf;
peprp[14] = 0; //(577)
peprp[15] = 0;
peprp[16] = 0;
peprp[17] = 0; //(578)
peprp[18] = 0;
peprp[19] = sinf * sinl / r / cosf;
peprp[20] = - cosf * cosl / r; //(579)
peprp[21] = 0;
peprp[22] = - sinf * cosl / r / cosf;
peprp[23] = - cosf * sinl / r; //(580)
peprp[24] = 0;
peprp[25] = 0;
peprp[26] = - sinf / r; //(581)
brmul(peprp, gmat, 9, 3, 3, pepr); //571
for (n = 0; n < 9; n++) //570
{
prtpx[n] = pepr[n*3];
prtpy[n] = pepr[n*3+1];
prtpz[n] = pepr[n*3+2];
}
mt (prtpx, 3, 3, prtpxx); //OH MY GOD!!!11.11
mt (prtpy, 3, 3, prtpyy); //OH MY GOD!!!11.11
mt (prtpz, 3, 3, prtpzz); //OH MY GOD!!!11.11
brmul(tb, prtpxx, 3, 3, 3, pgtpx); //568
brmul(tb, prtpyy, 3, 3, 3, pgtpy);
brmul(tb, prtpzz, 3, 3, 3, pgtpz);
brmul(pgtpx, fr, 3, 3, 1, pgx); //558 first term //11.10 change
brmul(pgtpy, fr, 3, 3, 1, pgy);
brmul(pgtpz, fr, 3, 3, 1, pgz);
for (n = 0; n < 3; n++)
{
part1[n*3] = pgx[n];
part1[n*3+1] = pgy[n];
part1[n*3+2] = pgz[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/************************partial part2*************************************/
for (n = 0; n < 3; n++)
{
prjpx[n] = 0;
prjpy[n] = 0;
prjpz[n] = 0;
prcspx[n] = 0;
prcspy[n] = 0;
prcspz[n] = 0;
}
for (n = 1; n <= GRAVDEGREE; n++)
{
prjpx[0] = prjpx[0] - (-cn0[n - 1]) * aprn[n - 1]
* (n + 1) * pn[n - 1] * (n + 2); //561
prjpx[2] = prjpx[2] - (-cn0[n - 1]) * aprn[n - 1]
* (-cosf) * pnp[n - 1] * (n + 2); //561
prjpz[0] = prjpz[0] + (-cn0[n - 1]) * aprn[n - 1]
* (n + 1) * cosf * pnp[n - 1]; //563
prjpz[2] = prjpz[2] + (-cn0[n - 1]) * aprn[n - 1]
* ( sinf * pnp[n - 1] - cosf * cosf * pnpp[n - 1] );//563
}
for (n = 1; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
if ( n == GRAVDEGREE && m > GRAVORDER)
{
// printf ("%d\t%d\n",n,m);
break;
}
//from 564 to 566
prcspx[0] = prcspx[0] - aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1])
* (n + 2);
prcspx[1] = prcspx[1] - aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1])
* (n + 2);
prcspx[2] = prcspx[2] - aprn[n - 1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1])
* (n + 2);
prcspy[0] = prcspy[0] + m * aprn[n - 1]
* (n + 1) * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
- snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
prcspy[1] = prcspy[1] + m * aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)] / cosf
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
- snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
prcspy[2] = prcspy[2] + m * aprn[n - 1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)] / cosf
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
prcspz[0] = prcspz[0] + aprn[n - 1]
* ( - (n + 1)) * pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
prcspz[1] = prcspz[1] + aprn[n - 1]
* m * (sinf * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
+ pnmp[(n - 1) * GRAVDEGREE + (m - 1)]) / cosf
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
prcspz[2] = prcspz[2] + aprn[n - 1]
* ( pnmpp[(n - 1) * GRAVDEGREE + (m - 1)]
- pnmp[(n - 1) * GRAVDEGREE + (m - 1)] * sinf / cosf)
* ( cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
}
for (n = 0; n < 3; n++)
{
prpr[n*3] = (prjpx[n] + prcspx[n]) * gma[0] / r / r / r;
prpr[n*3+1] = (prjpy[n] + prcspy[n]) * gma[0] / r / r / r;
prpr[n*3+2] = (prjpz[n] + prcspz[n]) * gma[0] / r / r / r;
}
brmul(prpr, gmat, 3, 3, 3, gtpr);
brmul(gmatt, gtpr, 3, 3, 3, part2);
for (n = 0; n <= 8; n++)
{
dadr[n] = part1[n] + part2[n];
}
/*****************************************************************************/
n = 2;
dfd2r[0] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfd2r[1] = 0;
dfd2r[2] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (-cosf) * pnp[n - 1];
for (n = 0; n < 3; n++)
{
dfd2r[n] = dfd2r[n] * gma[0] / r / r;
}
brmul(gmatt,dfd2r,3,3,1,dfd2);
for (n = 0; n <= 2; n++)
{
// dadp[n * DYNPAR] = dfd2[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
dadp[n * DYNPAR + 1] = dfd2[n];
}
n = 3;
dfd2r[0] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfd2r[1] = 0;
dfd2r[2] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (-cosf) * pnp[n - 1];
for (n = 0; n < 3; n++)
{
dfd2r[n] = dfd2r[n] * gma[0] / r / r;
}
brmul(gmatt,dfd2r,3,3,1,dfd2);
for (n = 0; n <= 2; n++)
{
// dadp[n * DYNPAR] = dfd2[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
dadp[n * DYNPAR + 2] = dfd2[n];
}
n = 4;
dfd2r[0] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfd2r[1] = 0;
dfd2r[2] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (-cosf) * pnp[n - 1];
for (n = 0; n < 3; n++)
{
dfd2r[n] = dfd2r[n] * gma[0] / r / r;
}
brmul(gmatt,dfd2r,3,3,1,dfd2);
for (n = 0; n <= 2; n++)
{
// dadp[n * DYNPAR] = dfd2[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
dadp[n * DYNPAR + 3] = dfd2[n];
}
k2 = 1;
stidecs_old(tjd, gma[1], k2, &c20, &c21, &s21, &c22, &s22);
n = 2;
cn0[n-1] = (c20) * sqrt(2*n+1);
dfdkr[0] = (-cn0[n - 1]) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfdkr[1] = 0;
dfdkr[2] = (-cn0[n - 1]) * aprn[n - 1] * (-cosf) * pnp[n - 1];
n = 2; m = 1;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = ( c21)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = ( s21)* unit;
dfdkr[0] = dfdkr[0] + aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
dfdkr[1] = dfdkr[1] + aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (-cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
dfdkr[2] = dfdkr[2] + aprn[n-1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
n = 2; m = 2;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = ( c22)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = ( s22)* unit;
dfdkr[0] = dfdkr[0] + aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
dfdkr[1] = dfdkr[1] + aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (-cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
dfdkr[2] = dfdkr[2] + aprn[n-1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
for (n = 0; n < 3; n++)
{
dfdkr[n] = dfdkr[n] * gma[0] / r / r;
}
brmul(gmatt,dfdkr,3,3,1,dfdk);
for (n = 0; n <= 2; n++)
{
dadp[n * DYNPAR] = dfdk[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
// dadp[n * DYNPAR + 1] = dfd2[n];
}
/*****************************************************************************/
free (pn);
free (pnp);
free (pnm);
free (pnmp);
free (pnpp);
free (pnmpp);
free (cosml);
free (sinml);
free (aprn);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* opengravfile - open gravity field
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double opengravfile (double *cn0, double *cnm, double *snm, double *gma)
{
FILE *fp_gra;
double value, c,s, nup, ndown, unit;
int n,m, i;
char string[100], name[20];
if ((fp_gra = fopen (FILE_GRV,"r")) == NULL)
{
printf ("Cannot open gravity file?\n");
getch();
exit (0);
}
while (feof (fp_gra) == 0)
{
fgets (string, 100, fp_gra);
sscanf (string, "%s%lf", name, &value);
if (strcmp (name,"Gm") ==0)
{
gma[0] = value / AU / AU / AU * 86400.0 * 86400.0;
}
if (strcmp (name,"RefDistance") ==0)
{
gma[1] = value / AU;
}
if (strcmp (name,"BEGIN") ==0)
break;
}
while (feof(fp_gra) == 0)
{
n = 999;
m = 999;
fgets (string, 100, fp_gra);
sscanf (string, "%d%d%lf%lf", &n, &m, &c, &s);
if (n > GRAVDEGREE)
continue;
else if (m == 0)
{
unit = sqrt (2 * n + 1.0);
cn0[n-1] = c * unit;
if (n == 2) j2 = c;
if (n == 3) j3 = c;
if (n == 4) j4 = c;
}
else
{
if (n == 2 && m == 1) {jc21 = c; js21 = s; }
if (n == 2 && m == 2) {jc22 = c; js22 = s; }
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = c * unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = s * unit;
}
}
fclose(fp_gra);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* itrf2icrf - from earth fixed to earth inertial
* @param1: description of param1
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double itrf2icrf(double jd, double utc, double *vt, double *vc)
{
double xp = 0, yp = 0, ut1_utc = 0, dx = 0, dy = 0, delta_t, ut1, tt;
geteop (utc, &xp, &yp, &ut1_utc, &dx, &dy);
delta_t = 32.184 + LEAPSECS - ut1_utc;
ut1 = utc + ut1_utc;
tt = utc + (LEAPSECS + 32.184);
cel_pole (jd + tt / 86400.0, 2, dx * 1e3, dy * 1e3);
ter2cel (jd, ut1 / 86400.0, delta_t, 1, ACCURACY, 0,
xp, yp, vt, vc); /*--vc unit: m--*/
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iau_pns - planet fixed to J2000 inertial (for gravity field)
Report of the IAU/IAGWorking Group on cartographic
coordinates and rotational elements: 2006
* @param1: description of param1
* @param2: description of param2
* todo:
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void iau_pns (double *jd, double *te, int cent)
{
double tes[9] = {0}, tepn[9] ={0}, tb[9], utc;
double vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1}, te2[9];
// double ty[3], tz[3], te1[9], tx[3];
int i;
if (cent == 2)
{
utc = jd[1] * 86400 - (LEAPSECS + 32.184); //jd[1]: tt(tdt)
lagrange (TE_EPH, DIM_TE, 10, utc, te2);
/*
itrf2icrf(jd[0], utc, vx, tx);
itrf2icrf(jd[0], utc, vy, ty);
itrf2icrf(jd[0], utc, vz, tz);
for (i = 0; i < 3; i++)
{
te1[i*3] = tx[i];
te1[i*3+1] = ty[i];
te1[i*3+2] = tz[i];
}
*/
for (i = 0; i < 9; i++)
te[i] = te2[i];
}
else if (cent == 9)
{
mbf2cel (jd, te);
// in2pa (jd, tb);
// mt (tb, 3, 3, te);
}
else
{
cent = cent +1;
iau_s (jd, tes, cent); //IAU fixed to IAU inertial
iau_pn (jd, tepn, cent); //IAU inertial to J2000 inertial
brmul (tepn,tes,3,3,3,te);
}
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iau_s - from IAU fixed to IAU inertial, true-of-date equator and equinox
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void iau_s (double *jd, double *tes, int cent)
{
double d, str, cosst, sinst;
d = jd[0] - 2451545.0;
d = d + jd[1];
switch (cent) //sun0, mercury1, ..., pluto9
{
case 0 : str = 84.176 + 14.1844000 * d; break;
case 1 : str = 329.548 + 6.1385025 * d; break;
case 2 : str = 160.20 - 1.4813688 * d; break;
case 3 : str = 190.147 + 360.9856235 * d; break;
case 4 : str = 176.63 + 350.89198226 * d; break;
case 5 : str = 284.95 + 870.5366420 * d; break;
case 6 : str = 38.90 + 810.7939024 * d; break;
case 7 : str = 203.81 - 501.1600928 * d; break;
case 8 : str = 253.18 + 536.3128492 * d -
0.48 * sin ((357.85 + 52.316 * d / 36525.0 ) * DEG2RAD);
break;
case 9 : str = 237.305 - 56.3625225 * d; break;
case I_TITAN : str = 186.5855 + 22.5769768 * d; break;
}
cosst = cos (str * DEG2RAD);
sinst = sin (str * DEG2RAD);
tes[0] = cosst;
tes[1] = -sinst;
tes[2] = 0;
tes[3] = sinst;
tes[4] = cosst;
tes[5] = 0;
tes[6] = 0;
tes[7] = 0;
tes[8] = 1;
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iau_pn - from IAU inertial (for all planets) to J2000 inertial
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void iau_pn (double *jd, double *tes, int cent)
{
double ra0, dec0, jcent, cr, sr, cd, sd;
jcent = jd[0] - 2451545.0;
jcent = (jcent + jd[1]) / 36525.0;
switch (cent) //sun0, mercury1, ..., pluto9
{
case 0 :
ra0 = 286.13;
dec0 = 63.87;
break;
case 1 :
ra0 = 281.01 - 0.033 * jcent;
dec0 = 61.45 - 0.005 * jcent;
break;
case 2 :
ra0 = 272.76;
dec0 = 67.16;
break;
case 3 :
ra0 = 0.00 - 0.641 * jcent;
dec0 = 90.0 - 0.557 * jcent;
break;
case 4 :
ra0 = 317.68143 - 0.1061 * jcent;
dec0 = 52.88650 - 0.0609 * jcent;
break;
case 5 :
ra0 = 268.05 - 0.009 * jcent;
dec0 = 64.49 + 0.003 * jcent;
break;
case 6 :
ra0 = 40.589 - 0.036 * jcent;
dec0 = 83.537 - 0.004 * jcent;
break;
case 7 :
ra0 = 257.311;
dec0 = -15.175;
break;
case 8 :
ra0 = 299.36 + 0.70 * sin ((357.85 + 52.316 * jcent) * DEG2RAD);
dec0 = 43.46 - 0.51 * cos ((357.85 + 52.316 * jcent) * DEG2RAD);
break;
case 9 :
ra0 = 313.02;
dec0 = 9.09;
break;
case I_TITAN :
ra0 = 39.4827;
dec0 = 83.4279;
break;
}
cr = cos (ra0 * DEG2RAD);
sr = sin (ra0 * DEG2RAD);
cd = cos (dec0 * DEG2RAD);
sd = sin (dec0 * DEG2RAD);
tes[0] = -sr;
tes[1] = -cr * sd;
tes[2] = cr * cd;
tes[3] = cr;
tes[4] = -sr * sd;
tes[5] = sr * cd;
tes[6] = 0;
tes[7] = cd;
tes[8] = sd;
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* in2pa - from inertial to moon fixed (PA)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void in2pa(double *jd, double *te)
{
double lib[6] = {0}, tb1[9], tb2[9], tb3[9], tb32[9];
int target, center;
target = 15;
center = 0;
// DPLEPH(jd, &target, ¢er, lib);
rotmatz (lib[0], tb1, 0);
rotmatx (lib[1], tb2, 0);
rotmatz (lib[2], tb3, 0);
brmul(tb3, tb2, 3, 3, 3, tb32);
brmul(tb32, tb1, 3, 3, 3, te);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* mbf2cel - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
short int mbf2cel (double *jd_tdb, double *te)
/*
------------------------------------------------------------------------
PURPOSE:
This function rotates a vector from the moon body-fixed system
to the celestial system.
REFERENCES:
P. Kenneth Seidelmann et. al. (2007). Report of the IAU/IAGWorking
Group on cartographic coordinates and rotational elements: 2006
INPUT
ARGUMENTS:
jd_tdb[2] (double)
TDB Julian date.
High-order part (jd_tdb[0]) & Low-order part (jd_tdb[0]).
method (short int)
Selection for method
= 0 ... IAU report formulae
= 1 ... NASA/JPL DE/LE ephemeris
ref_sys (short int)
Reference system in which moon body-fixed system is given
= 0 ... Mean Earth/polar axis (ME) system
= 1 ... Principal Axis (PA) system
derivation (short int)
Seclection derivation of parameters
= 0 ... No derivation, vecc is normal
= 1 ... fisrt parameter derivation, vecc is derivation
= 2 ... second
= 3 ... third
vecm[3] (double)
Position vector referred to moon body-fixed system
OUTPUT
ARGUMENTS:
vecc[3] (double)
Position vector referred to ICRF axes (celestial system)
RETURNED
VALUE:
= 0 ... everything is ok.
= 1 ... invalid value of 'ref_sys'
= 2 ... invalid value of 'method'
GLOBALS
USED:
FUNCTIONS
CALLED:
VER./DATE/
PROGRAMMER:
V1.0/03-10/ (SHAO).
NOTES:
------------------------------------------------------------------------
*/
{
short int error = 0;
double tb1[9], tb2[9], tbt1[9], tbt2[9];
/*
IAU report formulae
*/
me2pa(tb1);
mt(tb1, 3, 3, tbt1);
in2me(jd_tdb, tb2, 0);
mt(tb2, 3, 3, tbt2);
brmul(tbt2,tbt1,3,3,3,te); //ME2ICRF
return error;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* in2me - from inertial to moon fixed (ME)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void in2me (double *jd, double *te, short int derivation)
{
double ra, dec, w, lib[3], d, T, tb1[9], tb2[9], tb3[9], tb32[9],
E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13;
d = jd[0] - 2451545.0 + jd[1];
T = d / 36525.0;
E1 = 125.045 - 0.0529921 * d;
E2 = 250.089 - 0.1059842 * d;
E3 = 260.008 + 13.0120009 * d;
E4 = 176.625 + 13.3407154 * d;
E5 = 357.529 + 0.9856003 * d;
E6 = 311.589 + 26.4057084 * d;
E7 = 134.963 + 13.0649930 * d;
E8 = 276.617 + 0.3287146 * d;
E9 = 34.226 + 1.7484877 * d;
E10 = 15.134 - 0.1589763 * d;
E11 = 119.743 + 0.0036096 * d;
E12 = 239.961 + 0.1643573 * d;
E13 = 25.053 + 12.9590088 * d;
ra = 269.9949 + 0.0031 * T - 3.8787 * sin (E1 * DEG2RAD)
- 0.1204 * sin (E2 * DEG2RAD) + 0.0700 * sin (E3 * DEG2RAD)
- 0.0172 * sin (E4 * DEG2RAD) + 0.0072 * sin (E6 * DEG2RAD)
- 0.0052 * sin (E10 * DEG2RAD) + 0.0043 * sin (E13 * DEG2RAD);
dec = 66.5392 + 0.0130 * T + 1.5419 * cos (E1 * DEG2RAD)
+ 0.0239 * cos (E2 * DEG2RAD) - 0.0278 * cos (E3 * DEG2RAD)
+ 0.0068 * cos (E4 * DEG2RAD) - 0.0029 * cos (E6 * DEG2RAD)
+ 0.0009 * cos (E7 * DEG2RAD) + 0.0008 * cos (E10 * DEG2RAD)
- 0.0009 * cos (E13 * DEG2RAD);
w = 38.3213 + 13.17635815 * d - 1.4e-12 * d * d
+ 3.5610 * sin (E1 * DEG2RAD) + 0.1208 * sin (E2 * DEG2RAD)
- 0.0642 * sin (E3 * DEG2RAD) + 0.0158 * sin (E4 * DEG2RAD)
+ 0.0252 * sin (E5 * DEG2RAD) - 0.0066 * sin (E6 * DEG2RAD)
- 0.0047 * sin (E7 * DEG2RAD) - 0.0046 * sin (E8 * DEG2RAD)
+ 0.0028 * sin (E9 * DEG2RAD) + 0.0052 * sin (E10 * DEG2RAD)
+ 0.0040 * sin (E11 * DEG2RAD) + 0.0019 * sin (E12 * DEG2RAD)
- 0.0044 * sin (E13 * DEG2RAD);
lib[0] = (90.0 + ra) * DEG2RAD;
lib[1] = (90.0 - dec) * DEG2RAD;
lib[2] = w * DEG2RAD;
rotmatz (lib[0], tb1, 0);
rotmatx (lib[1], tb2, 0);
rotmatz (lib[2], tb3, 0);
if (derivation == 1)
rotmatz (lib[0], tb1, 1);
if (derivation == 2)
rotmatx (lib[1], tb2, 1);
if (derivation == 3)
rotmatz (lib[2], tb3, 1);
brmul(tb3, tb2, 3, 3, 3, tb32);
brmul(tb32, tb1, 3, 3, 3, te);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* me2pa - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void me2pa (double *te)
{
double tb1[9], tb2[9], tb3[9], tb32[9];
rotmatx ( 0.1462 * ASEC2RAD, tb1, 0);
rotmaty (79.0768 * ASEC2RAD, tb2, 0);
rotmatz (63.8986 * ASEC2RAD, tb3, 0);
brmul(tb3, tb2, 3, 3, 3, tb32);
brmul(tb32, tb1, 3, 3, 3, te);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* rotmatx -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void rotmatx (double rad, double *matx, short int deri)
{
double cosst, sinst;
cosst = cos(rad);
sinst = sin(rad);
matx[0] = 1;
matx[1] = 0;
matx[2] = 0;
matx[3] = 0;
matx[4] = cosst;
matx[5] = sinst;
matx[6] = 0;
matx[7] = -sinst;
matx[8] = cosst;
if (deri == 1)
{
matx[0] = 0;
matx[1] = 0;
matx[2] = 0;
matx[3] = 0;
matx[4] = -sinst;
matx[5] = cosst;
matx[6] = 0;
matx[7] = -cosst;
matx[8] = -sinst;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* rotmaty -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void rotmaty (double rad, double *maty, short int deri)
{
double cosst, sinst;
cosst = cos(rad);
sinst = sin(rad);
maty[0] = cosst;
maty[1] = 0;
maty[2] = -sinst;
maty[3] = 0;
maty[4] = 1;
maty[5] = 0;
maty[6] = sinst;
maty[7] = 0;
maty[8] = cosst;
if (deri == 1)
{
maty[0] = -sinst;
maty[1] = 0;
maty[2] = -cosst;
maty[3] = 0;
maty[4] = 0;
maty[5] = 0;
maty[6] = cosst;
maty[7] = 0;
maty[8] = -sinst;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* rotmatz -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void rotmatz (double rad, double *matz, short int deri)
{
double cosst, sinst;
cosst = cos(rad);
sinst = sin(rad);
matz[0] = cosst;
matz[1] = sinst;
matz[2] = 0;
matz[3] = -sinst;
matz[4] = cosst;
matz[5] = 0;
matz[6] = 0;
matz[7] = 0;
matz[8] = 1;
if (deri == 1)
{
matz[0] = -sinst;
matz[1] = cosst;
matz[2] = 0;
matz[3] = -cosst;
matz[4] = -sinst;
matz[5] = 0;
matz[6] = 0;
matz[7] = 0;
matz[8] = 0;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/****************************************************************************/
/* */
/* Functions for Runge-Kutta integrator */
/* */
/* Version: 2009-9-8 */
/* */
/* Copyright (c) 2009 shangkun@shao.ac.cn All Right Reserved */
/* */
/****************************************************************************/
/*
Version: 2009-9-8
Version: 2009-9-13 integrate forwards & backwards
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double rkf78_auto (double h, double t, double *x, int dim, double err,
double (*fun)(double,double *,double *), int autoadjust)
/*
purpose: auto-adjusted Runge-Kutta-Ful... integrator
input: double h integration step
double t integrate from t to t+h
double *x x(t)
int dim dim(x)
double err tolerance of step control
double (*fun)() right(force) function
output: double *x x(t+h)
return: h new step after adjustment
*/
{
int i, j, n, flag = 0;
double *y, *k, *f, d = 0, tn;
double a[13] = { 0, 2.0/27, 1.0/9, 1.0/6, 5.0/12, 1.0/2, 5.0/6, 1.0/6,
2.0/3, 1.0/3, 1.0, 0, 1.0 };
double c[13] = { 0, 0, 0, 0, 0, 34.0/105, 9.0/35, 9.0/35, 9.0/280,
9.0/280, 0, 41.0/840, 41.0/840 };
double b[13][12] =
{
{0},
{2.0/27},
{1.0/36,1.0/12},
{1.0/24,0,1.0/8},
{5.0/12,0,-25.0/16,25.0/16},
{1.0/20,0,0,1.0/4,1.0/5},
{-25.0/108,0,0,125.0/108,-65.0/27,125.0/54},
{31.0/300,0,0,0,61.0/225,-2.0/9,13.0/900},
{2.0,0,0,-53.0/6,704.0/45,-107.0/9,67.0/90,3.0},
{-91.0/108,0,0,23.0/108,-976.0/135,311.0/54,-19.0/60,17.0/6,-1.0/12},
{2383.0/4100,0,0,-341.0/164,4496.0/1025,-301.0/82,2133.0/4100,
45.0/82,45.0/164,18.0/41},
{3.0/205,0,0,0,0,-6.0/41,-3.0/205,-3.0/41,3.0/41,6.0/41},
{-1777.0/4100,0,0,-341.0/164,4496.0/1025,-289.0/82,2193.0/4100,
51.0/82,33.0/164,12.0/41,0,1.0}
};
y = (double *) calloc (dim, sizeof(double));
k = (double *) calloc (dim*13, sizeof(double));
f = (double *) calloc (dim, sizeof(double));
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
do
{
for (i = 0; i <= 12; i++)
{
tn = t + a[i] * h;
for (n = 0; n <= dim - 1; n++)
{
y[n] = x[n];
for (j = 0; j <= i-1; j++)
y[n] = y[n] + h * b[i][j] * k[n*13+j];
}
fun (tn,y,f);
for (n = 0; n <= dim - 1; n++)
{
k[n*13+i] = f[n];
}
}
d = 0;
for (n = 0; n <= dim - 1; n++)
{
d = d + fabs (41.0 / 840 * (k[n*13+0] + k[n*13+10]
- k[n*13+11] - k[n*13+12]) * h);
}
flag = 0;
if (autoadjust == 1)
{
if (d > err) //adapting step h
{
h = h/2.0;
flag = 1;
}
if ( (d < err * 1e-4) && (h < 5e-3))
{
h = h*2.0;
flag = 2;
}
}
}while (flag == 1);
for (n = 0; n <= dim - 1; n++)
{
for (i = 0; i <= 12; i++)
x[n] = x[n] + h * c[i] * k[n*13+i];
}
free (y);
free (f);
free (k);
return h;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* enlgr -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double enlgr (double *x, double *y, int n, double t)
{
int i, j, k, m;
double z, s;
z = 0.0;
if (n < 1)
return (z);
if (n == 1)
{
z = y[0];
return (z);
}
if (n == 2)
{
z = (y[0] * (t - x[1]) - y[1] * (t - x[0])) / (x[0] - x[1]);
return(z);
}
i = 0;
while ((x[i] < t) && (i < n))
i = i + 1;
k = i - 4;
if (k < 0)
k = 0;
m = i + 3;
if (m > n - 1)
m = n - 1;
for (i = k; i <= m; i++)
{
s = 1.0;
for (j = k; j <= m; j++)
{
if (j != i)
s = s * (t - x[j]) / (x[i] - x[j]);
}
z = z + s * y[i];
}
return (z);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* bssgj -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
int bssgj (double *a,int n)
{
int i, j, k, m;
double w, g, *b;
b = (double *)malloc (n * sizeof(double));
for (k = 0; k <= n - 1; k++)
{
w = a[0];
if (fabs (w) + 1.0 == 1.0)
{
free (b);
printf ("fail\n");
return (-2);
}
m = n - k - 1;
for (i = 1; i <= n - 1; i++)
{
g = a[i * n];
b[i] = g / w;
if (i <= m)
b[i] = - b[i];
for (j = 1; j <= i; j++)
a[(i - 1) * n + j - 1] = a[i * n + j] + g * b[j];
}
a[n * n - 1] = 1.0 / w;
for (i = 1; i <= n - 1; i++)
a[(n - 1) * n + i - 1] = b[i];
}
for (i=0; i<=n-2; i++)
{
for (j=i+1; j<=n-1; j++)
a[i*n+j]=a[j*n+i];
}
free(b);
return(2);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iauDtdb - precise tdb-tt correction: better than +/- 3 nanoseconds
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double iauDtdb(double date1, double date2,
double ut, double elong, double u, double v)
/*
** - - - - - - - -
** i a u D t d b
** - - - - - - - -
**
** An approximation to TDB-TT, the difference between barycentric
** dynamical time and terrestrial time, for an observer on the Earth.
**
** The different time scales - proper, coordinate and realized - are
** related to each other:
**
** TAI <- physically realized
** :
** offset <- observed (nominally +32.184s)
** :
** TT <- terrestrial time
** :
** rate adjustment (L_G) <- definition of TT
** :
** TCG <- time scale for GCRS
** :
** "periodic" terms <- iauDtdb is an implementation
** :
** rate adjustment (L_C) <- function of solar-system ephemeris
** :
** TCB <- time scale for BCRS
** :
** rate adjustment (-L_B) <- definition of TDB
** :
** TDB <- TCB scaled to track TT
** :
** "periodic" terms <- -iau_DTDB is an approximation
** :
** TT <- terrestrial time
**
** Adopted values for the various constants can be found in the IERS
** Conventions (McCarthy & Petit 2003).
**
** This function is part of the International Astronomical Union's
** SOFA (Standards Of Fundamental Astronomy) software collection.
**
** Status: canonical model.
**
** Given:
** date1,date2 double date, TDB (Notes 1-3)
** ut double universal time (UT1, fraction of one day)
** elong double longitude (east positive, radians)
** u double distance from Earth spin axis (km)
** v double distance north of equatorial plane (km)
**
** Returned (function value):
** double TDB-TT (seconds)
**
** Notes:
**
** 1) The TT date date1+date2 is a Julian Date, apportioned in any
** convenient way between the two arguments. For example,
** JD(TT)=2450123.7 could be expressed in any of these ways,
** among others:
**
** date1 date2
**
** 2450123.7 0.0 (JD method)
** 2451545.0 -1421.3 (J2000 method)
** 2400000.5 50123.2 (MJD method)
** 2450123.5 0.2 (date & time method)
**
** The JD method is the most natural and convenient to use in
** cases where the loss of several decimal digits of resolution
** is acceptable. The J2000 method is best matched to the way
** the argument is handled internally and will deliver the
** optimum resolution. The MJD method and the date & time methods
** are both good compromises between resolution and convenience.
**
** Although the date is, formally, barycentric dynamical time (TDB),
** the terrestrial dynamical time (TT) can be used with no practical
** effect on the accuracy of the prediction.
**
** 2) TT can be regarded as a coordinate time that is realized as an
** offset of 32.184s from International Atomic Time, TAI. TT is a
** specific linear transformation of geocentric coordinate time TCG,
** which is the time scale for the Geocentric Celestial Reference
** System, GCRS.
**
** 3) TDB is a coordinate time, and is a specific linear transformation
** of barycentric coordinate time TCB, which is the time scale for
** the Barycentric Celestial Reference System, BCRS.
**
** 4) The difference TCG-TCB depends on the masses and positions of the
** bodies of the solar system and the velocity of the Earth. It is
** dominated by a rate difference, the residual being of a periodic
** character. The latter, which is modeled by the present function,
** comprises a main (annual) sinusoidal term of amplitude
** approximately 0.00166 seconds, plus planetary terms up to about
** 20 microseconds, and lunar and diurnal terms up to 2 microseconds.
** These effects come from the changing transverse Doppler effect
** and gravitational red-shift as the observer (on the Earth's
** surface) experiences variations in speed (with respect to the
** BCRS) and gravitational potential.
**
** 5) TDB can be regarded as the same as TCB but with a rate adjustment
** to keep it close to TT, which is convenient for many applications.
** The history of successive attempts to define TDB is set out in
** Resolution 3 adopted by the IAU General Assembly in 2006, which
** defines a fixed TDB(TCB) transformation that is consistent with
** contemporary solar-system ephemerides. Future ephemerides will
** imply slightly changed transformations between TCG and TCB, which
** could introduce a linear drift between TDB and TT; however, any
** such drift is unlikely to exceed 1 nanosecond per century.
**
** 6) The geocentric TDB-TT model used in the present function is that of
** Fairhead & Bretagnon (1990), in its full form. It was originally
** supplied by Fairhead (private communications with P.T.Wallace,
** 1990) as a Fortran subroutine. The present C function contains an
** adaptation of the Fairhead code. The numerical results are
** essentially unaffected by the changes, the differences with
** respect to the Fairhead & Bretagnon original being at the 1e-20 s
** level.
**
** The topocentric part of the model is from Moyer (1981) and
** Murray (1983), with fundamental arguments adapted from
** Simon et al. 1994. It is an approximation to the expression
** ( v / c ) . ( r / c ), where v is the barycentric velocity of
** the Earth, r is the geocentric position of the observer and
** c is the speed of light.
**
** By supplying zeroes for u and v, the topocentric part of the
** model can be nullified, and the function will return the Fairhead
** & Bretagnon result alone.
**
** 7) During the interval 1950-2050, the absolute accuracy is better
** than +/- 3 nanoseconds relative to time ephemerides obtained by
** direct numerical integrations based on the JPL DE405 solar system
** ephemeris.
**
** 8) It must be stressed that the present function is merely a model,
** and that numerical integration of solar-system ephemerides is the
** definitive method for predicting the relationship between TCG and
** TCB and hence between TT and TDB.
**
** References:
**
** Fairhead, L., & Bretagnon, P., Astron.Astrophys., 229, 240-247
** (1990).
**
** IAU 2006 Resolution 3.
**
** McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003),
** IERS Technical Note No. 32, BKG (2004)
**
** Moyer, T.D., Cel.Mech., 23, 33 (1981).
**
** Murray, C.A., Vectorial Astrometry, Adam Hilger (1983).
**
** Seidelmann, P.K. et al., Explanatory Supplement to the
** Astronomical Almanac, Chapter 2, University Science Books (1992).
**
** Simon, J.L., Bretagnon, P., Chapront, J., Chapront-Touze, M.,
** Francou, G. & Laskar, J., Astron.Astrophys., 282, 663-683 (1994).
**
** This revision: 2008 May 24
**
** Copyright (C) 2008 IAU SOFA Review Board. See notes at end.
*/
{
double t, tsol, w, elsun, emsun, d, elj, els, wt, w0, w1, w2, w3, w4,
wf, wj;
int j;
/*
** =====================
** Fairhead et al. model
** =====================
**
** 787 sets of three coefficients.
**
** Each set is
** amplitude (microseconds)
** frequency (radians per Julian millennium since J2000)
** phase (radians)
**
** Sets 1-474 are the T**0 terms
** " 475-679 " " T**1
** " 680-764 " " T**2
** " 765-784 " " T**3
** " 785-787 " " T**4
*/
static const double fairhd[787][3] = {
/* 1, 10 */
{ 1656.674564e-6, 6283.075849991, 6.240054195 },
{ 22.417471e-6, 5753.384884897, 4.296977442 },
{ 13.839792e-6, 12566.151699983, 6.196904410 },
{ 4.770086e-6, 529.690965095, 0.444401603 },
{ 4.676740e-6, 6069.776754553, 4.021195093 },
{ 2.256707e-6, 213.299095438, 5.543113262 },
{ 1.694205e-6, -3.523118349, 5.025132748 },
{ 1.554905e-6, 77713.771467920, 5.198467090 },
{ 1.276839e-6, 7860.419392439, 5.988822341 },
{ 1.193379e-6, 5223.693919802, 3.649823730 },
/* 11, 20 */
{ 1.115322e-6, 3930.209696220, 1.422745069 },
{ 0.794185e-6, 11506.769769794, 2.322313077 },
{ 0.447061e-6, 26.298319800, 3.615796498 },
{ 0.435206e-6, -398.149003408, 4.349338347 },
{ 0.600309e-6, 1577.343542448, 2.678271909 },
{ 0.496817e-6, 6208.294251424, 5.696701824 },
{ 0.486306e-6, 5884.926846583, 0.520007179 },
{ 0.432392e-6, 74.781598567, 2.435898309 },
{ 0.468597e-6, 6244.942814354, 5.866398759 },
{ 0.375510e-6, 5507.553238667, 4.103476804 },
/* 21, 30 */
{ 0.243085e-6, -775.522611324, 3.651837925 },
{ 0.173435e-6, 18849.227549974, 6.153743485 },
{ 0.230685e-6, 5856.477659115, 4.773852582 },
{ 0.203747e-6, 12036.460734888, 4.333987818 },
{ 0.143935e-6, -796.298006816, 5.957517795 },
{ 0.159080e-6, 10977.078804699, 1.890075226 },
{ 0.119979e-6, 38.133035638, 4.551585768 },
{ 0.118971e-6, 5486.777843175, 1.914547226 },
{ 0.116120e-6, 1059.381930189, 0.873504123 },
{ 0.137927e-6, 11790.629088659, 1.135934669 },
/* 31, 40 */
{ 0.098358e-6, 2544.314419883, 0.092793886 },
{ 0.101868e-6, -5573.142801634, 5.984503847 },
{ 0.080164e-6, 206.185548437, 2.095377709 },
{ 0.079645e-6, 4694.002954708, 2.949233637 },
{ 0.062617e-6, 20.775395492, 2.654394814 },
{ 0.075019e-6, 2942.463423292, 4.980931759 },
{ 0.064397e-6, 5746.271337896, 1.280308748 },
{ 0.063814e-6, 5760.498431898, 4.167901731 },
{ 0.048042e-6, 2146.165416475, 1.495846011 },
{ 0.048373e-6, 155.420399434, 2.251573730 },
/* 41, 50 */
{ 0.058844e-6, 426.598190876, 4.839650148 },
{ 0.046551e-6, -0.980321068, 0.921573539 },
{ 0.054139e-6, 17260.154654690, 3.411091093 },
{ 0.042411e-6, 6275.962302991, 2.869567043 },
{ 0.040184e-6, -7.113547001, 3.565975565 },
{ 0.036564e-6, 5088.628839767, 3.324679049 },
{ 0.040759e-6, 12352.852604545, 3.981496998 },
{ 0.036507e-6, 801.820931124, 6.248866009 },
{ 0.036955e-6, 3154.687084896, 5.071801441 },
{ 0.042732e-6, 632.783739313, 5.720622217 },
/* 51, 60 */
{ 0.042560e-6, 161000.685737473, 1.270837679 },
{ 0.040480e-6, 15720.838784878, 2.546610123 },
{ 0.028244e-6, -6286.598968340, 5.069663519 },
{ 0.033477e-6, 6062.663207553, 4.144987272 },
{ 0.034867e-6, 522.577418094, 5.210064075 },
{ 0.032438e-6, 6076.890301554, 0.749317412 },
{ 0.030215e-6, 7084.896781115, 3.389610345 },
{ 0.029247e-6, -71430.695617928, 4.183178762 },
{ 0.033529e-6, 9437.762934887, 2.404714239 },
{ 0.032423e-6, 8827.390269875, 5.541473556 },
/* 61, 70 */
{ 0.027567e-6, 6279.552731642, 5.040846034 },
{ 0.029862e-6, 12139.553509107, 1.770181024 },
{ 0.022509e-6, 10447.387839604, 1.460726241 },
{ 0.020937e-6, 8429.241266467, 0.652303414 },
{ 0.020322e-6, 419.484643875, 3.735430632 },
{ 0.024816e-6, -1194.447010225, 1.087136918 },
{ 0.025196e-6, 1748.016413067, 2.901883301 },
{ 0.021691e-6, 14143.495242431, 5.952658009 },
{ 0.017673e-6, 6812.766815086, 3.186129845 },
{ 0.022567e-6, 6133.512652857, 3.307984806 },
/* 71, 80 */
{ 0.016155e-6, 10213.285546211, 1.331103168 },
{ 0.014751e-6, 1349.867409659, 4.308933301 },
{ 0.015949e-6, -220.412642439, 4.005298270 },
{ 0.015974e-6, -2352.866153772, 6.145309371 },
{ 0.014223e-6, 17789.845619785, 2.104551349 },
{ 0.017806e-6, 73.297125859, 3.475975097 },
{ 0.013671e-6, -536.804512095, 5.971672571 },
{ 0.011942e-6, 8031.092263058, 2.053414715 },
{ 0.014318e-6, 16730.463689596, 3.016058075 },
{ 0.012462e-6, 103.092774219, 1.737438797 },
/* 81, 90 */
{ 0.010962e-6, 3.590428652, 2.196567739 },
{ 0.015078e-6, 19651.048481098, 3.969480770 },
{ 0.010396e-6, 951.718406251, 5.717799605 },
{ 0.011707e-6, -4705.732307544, 2.654125618 },
{ 0.010453e-6, 5863.591206116, 1.913704550 },
{ 0.012420e-6, 4690.479836359, 4.734090399 },
{ 0.011847e-6, 5643.178563677, 5.489005403 },
{ 0.008610e-6, 3340.612426700, 3.661698944 },
{ 0.011622e-6, 5120.601145584, 4.863931876 },
{ 0.010825e-6, 553.569402842, 0.842715011 },
/* 91, 100 */
{ 0.008666e-6, -135.065080035, 3.293406547 },
{ 0.009963e-6, 149.563197135, 4.870690598 },
{ 0.009858e-6, 6309.374169791, 1.061816410 },
{ 0.007959e-6, 316.391869657, 2.465042647 },
{ 0.010099e-6, 283.859318865, 1.942176992 },
{ 0.007147e-6, -242.728603974, 3.661486981 },
{ 0.007505e-6, 5230.807466803, 4.920937029 },
{ 0.008323e-6, 11769.853693166, 1.229392026 },
{ 0.007490e-6, -6256.777530192, 3.658444681 },
{ 0.009370e-6, 149854.400134205, 0.673880395 },
/* 101, 110 */
{ 0.007117e-6, 38.027672636, 5.294249518 },
{ 0.007857e-6, 12168.002696575, 0.525733528 },
{ 0.007019e-6, 6206.809778716, 0.837688810 },
{ 0.006056e-6, 955.599741609, 4.194535082 },
{ 0.008107e-6, 13367.972631107, 3.793235253 },
{ 0.006731e-6, 5650.292110678, 5.639906583 },
{ 0.007332e-6, 36.648562930, 0.114858677 },
{ 0.006366e-6, 4164.311989613, 2.262081818 },
{ 0.006858e-6, 5216.580372801, 0.642063318 },
{ 0.006919e-6, 6681.224853400, 6.018501522 },
/* 111, 120 */
{ 0.006826e-6, 7632.943259650, 3.458654112 },
{ 0.005308e-6, -1592.596013633, 2.500382359 },
{ 0.005096e-6, 11371.704689758, 2.547107806 },
{ 0.004841e-6, 5333.900241022, 0.437078094 },
{ 0.005582e-6, 5966.683980335, 2.246174308 },
{ 0.006304e-6, 11926.254413669, 2.512929171 },
{ 0.006603e-6, 23581.258177318, 5.393136889 },
{ 0.005123e-6, -1.484472708, 2.999641028 },
{ 0.004648e-6, 1589.072895284, 1.275847090 },
{ 0.005119e-6, 6438.496249426, 1.486539246 },
/* 121, 130 */
{ 0.004521e-6, 4292.330832950, 6.140635794 },
{ 0.005680e-6, 23013.539539587, 4.557814849 },
{ 0.005488e-6, -3.455808046, 0.090675389 },
{ 0.004193e-6, 7234.794256242, 4.869091389 },
{ 0.003742e-6, 7238.675591600, 4.691976180 },
{ 0.004148e-6, -110.206321219, 3.016173439 },
{ 0.004553e-6, 11499.656222793, 5.554998314 },
{ 0.004892e-6, 5436.993015240, 1.475415597 },
{ 0.004044e-6, 4732.030627343, 1.398784824 },
{ 0.004164e-6, 12491.370101415, 5.650931916 },
/* 131, 140 */
{ 0.004349e-6, 11513.883316794, 2.181745369 },
{ 0.003919e-6, 12528.018664345, 5.823319737 },
{ 0.003129e-6, 6836.645252834, 0.003844094 },
{ 0.004080e-6, -7058.598461315, 3.690360123 },
{ 0.003270e-6, 76.266071276, 1.517189902 },
{ 0.002954e-6, 6283.143160294, 4.447203799 },
{ 0.002872e-6, 28.449187468, 1.158692983 },
{ 0.002881e-6, 735.876513532, 0.349250250 },
{ 0.003279e-6, 5849.364112115, 4.893384368 },
{ 0.003625e-6, 6209.778724132, 1.473760578 },
/* 141, 150 */
{ 0.003074e-6, 949.175608970, 5.185878737 },
{ 0.002775e-6, 9917.696874510, 1.030026325 },
{ 0.002646e-6, 10973.555686350, 3.918259169 },
{ 0.002575e-6, 25132.303399966, 6.109659023 },
{ 0.003500e-6, 263.083923373, 1.892100742 },
{ 0.002740e-6, 18319.536584880, 4.320519510 },
{ 0.002464e-6, 202.253395174, 4.698203059 },
{ 0.002409e-6, 2.542797281, 5.325009315 },
{ 0.003354e-6, -90955.551694697, 1.942656623 },
{ 0.002296e-6, 6496.374945429, 5.061810696 },
/* 151, 160 */
{ 0.003002e-6, 6172.869528772, 2.797822767 },
{ 0.003202e-6, 27511.467873537, 0.531673101 },
{ 0.002954e-6, -6283.008539689, 4.533471191 },
{ 0.002353e-6, 639.897286314, 3.734548088 },
{ 0.002401e-6, 16200.772724501, 2.605547070 },
{ 0.003053e-6, 233141.314403759, 3.029030662 },
{ 0.003024e-6, 83286.914269554, 2.355556099 },
{ 0.002863e-6, 17298.182327326, 5.240963796 },
{ 0.002103e-6, -7079.373856808, 5.756641637 },
{ 0.002303e-6, 83996.847317911, 2.013686814 },
/* 161, 170 */
{ 0.002303e-6, 18073.704938650, 1.089100410 },
{ 0.002381e-6, 63.735898303, 0.759188178 },
{ 0.002493e-6, 6386.168624210, 0.645026535 },
{ 0.002366e-6, 3.932153263, 6.215885448 },
{ 0.002169e-6, 11015.106477335, 4.845297676 },
{ 0.002397e-6, 6243.458341645, 3.809290043 },
{ 0.002183e-6, 1162.474704408, 6.179611691 },
{ 0.002353e-6, 6246.427287062, 4.781719760 },
{ 0.002199e-6, -245.831646229, 5.956152284 },
{ 0.001729e-6, 3894.181829542, 1.264976635 },
/* 171, 180 */
{ 0.001896e-6, -3128.388765096, 4.914231596 },
{ 0.002085e-6, 35.164090221, 1.405158503 },
{ 0.002024e-6, 14712.317116458, 2.752035928 },
{ 0.001737e-6, 6290.189396992, 5.280820144 },
{ 0.002229e-6, 491.557929457, 1.571007057 },
{ 0.001602e-6, 14314.168113050, 4.203664806 },
{ 0.002186e-6, 454.909366527, 1.402101526 },
{ 0.001897e-6, 22483.848574493, 4.167932508 },
{ 0.001825e-6, -3738.761430108, 0.545828785 },
{ 0.001894e-6, 1052.268383188, 5.817167450 },
/* 181, 190 */
{ 0.001421e-6, 20.355319399, 2.419886601 },
{ 0.001408e-6, 10984.192351700, 2.732084787 },
{ 0.001847e-6, 10873.986030480, 2.903477885 },
{ 0.001391e-6, -8635.942003763, 0.593891500 },
{ 0.001388e-6, -7.046236698, 1.166145902 },
{ 0.001810e-6, -88860.057071188, 0.487355242 },
{ 0.001288e-6, -1990.745017041, 3.913022880 },
{ 0.001297e-6, 23543.230504682, 3.063805171 },
{ 0.001335e-6, -266.607041722, 3.995764039 },
{ 0.001376e-6, 10969.965257698, 5.152914309 },
/* 191, 200 */
{ 0.001745e-6, 244287.600007027, 3.626395673 },
{ 0.001649e-6, 31441.677569757, 1.952049260 },
{ 0.001416e-6, 9225.539273283, 4.996408389 },
{ 0.001238e-6, 4804.209275927, 5.503379738 },
{ 0.001472e-6, 4590.910180489, 4.164913291 },
{ 0.001169e-6, 6040.347246017, 5.841719038 },
{ 0.001039e-6, 5540.085789459, 2.769753519 },
{ 0.001004e-6, -170.672870619, 0.755008103 },
{ 0.001284e-6, 10575.406682942, 5.306538209 },
{ 0.001278e-6, 71.812653151, 4.713486491 },
/* 201, 210 */
{ 0.001321e-6, 18209.330263660, 2.624866359 },
{ 0.001297e-6, 21228.392023546, 0.382603541 },
{ 0.000954e-6, 6282.095528923, 0.882213514 },
{ 0.001145e-6, 6058.731054289, 1.169483931 },
{ 0.000979e-6, 5547.199336460, 5.448375984 },
{ 0.000987e-6, -6262.300454499, 2.656486959 },
{ 0.001070e-6, -154717.609887482, 1.827624012 },
{ 0.000991e-6, 4701.116501708, 4.387001801 },
{ 0.001155e-6, -14.227094002, 3.042700750 },
{ 0.001176e-6, 277.034993741, 3.335519004 },
/* 211, 220 */
{ 0.000890e-6, 13916.019109642, 5.601498297 },
{ 0.000884e-6, -1551.045222648, 1.088831705 },
{ 0.000876e-6, 5017.508371365, 3.969902609 },
{ 0.000806e-6, 15110.466119866, 5.142876744 },
{ 0.000773e-6, -4136.910433516, 0.022067765 },
{ 0.001077e-6, 175.166059800, 1.844913056 },
{ 0.000954e-6, -6284.056171060, 0.968480906 },
{ 0.000737e-6, 5326.786694021, 4.923831588 },
{ 0.000845e-6, -433.711737877, 4.749245231 },
{ 0.000819e-6, 8662.240323563, 5.991247817 },
/* 221, 230 */
{ 0.000852e-6, 199.072001436, 2.189604979 },
{ 0.000723e-6, 17256.631536341, 6.068719637 },
{ 0.000940e-6, 6037.244203762, 6.197428148 },
{ 0.000885e-6, 11712.955318231, 3.280414875 },
{ 0.000706e-6, 12559.038152982, 2.824848947 },
{ 0.000732e-6, 2379.164473572, 2.501813417 },
{ 0.000764e-6, -6127.655450557, 2.236346329 },
{ 0.000908e-6, 131.541961686, 2.521257490 },
{ 0.000907e-6, 35371.887265976, 3.370195967 },
{ 0.000673e-6, 1066.495477190, 3.876512374 },
/* 231, 240 */
{ 0.000814e-6, 17654.780539750, 4.627122566 },
{ 0.000630e-6, 36.027866677, 0.156368499 },
{ 0.000798e-6, 515.463871093, 5.151962502 },
{ 0.000798e-6, 148.078724426, 5.909225055 },
{ 0.000806e-6, 309.278322656, 6.054064447 },
{ 0.000607e-6, -39.617508346, 2.839021623 },
{ 0.000601e-6, 412.371096874, 3.984225404 },
{ 0.000646e-6, 11403.676995575, 3.852959484 },
{ 0.000704e-6, 13521.751441591, 2.300991267 },
{ 0.000603e-6, -65147.619767937, 4.140083146 },
/* 241, 250 */
{ 0.000609e-6, 10177.257679534, 0.437122327 },
{ 0.000631e-6, 5767.611978898, 4.026532329 },
{ 0.000576e-6, 11087.285125918, 4.760293101 },
{ 0.000674e-6, 14945.316173554, 6.270510511 },
{ 0.000726e-6, 5429.879468239, 6.039606892 },
{ 0.000710e-6, 28766.924424484, 5.672617711 },
{ 0.000647e-6, 11856.218651625, 3.397132627 },
{ 0.000678e-6, -5481.254918868, 6.249666675 },
{ 0.000618e-6, 22003.914634870, 2.466427018 },
{ 0.000738e-6, 6134.997125565, 2.242668890 },
/* 251, 260 */
{ 0.000660e-6, 625.670192312, 5.864091907 },
{ 0.000694e-6, 3496.032826134, 2.668309141 },
{ 0.000531e-6, 6489.261398429, 1.681888780 },
{ 0.000611e-6, -143571.324284214, 2.424978312 },
{ 0.000575e-6, 12043.574281889, 4.216492400 },
{ 0.000553e-6, 12416.588502848, 4.772158039 },
{ 0.000689e-6, 4686.889407707, 6.224271088 },
{ 0.000495e-6, 7342.457780181, 3.817285811 },
{ 0.000567e-6, 3634.621024518, 1.649264690 },
{ 0.000515e-6, 18635.928454536, 3.945345892 },
/* 261, 270 */
{ 0.000486e-6, -323.505416657, 4.061673868 },
{ 0.000662e-6, 25158.601719765, 1.794058369 },
{ 0.000509e-6, 846.082834751, 3.053874588 },
{ 0.000472e-6, -12569.674818332, 5.112133338 },
{ 0.000461e-6, 6179.983075773, 0.513669325 },
{ 0.000641e-6, 83467.156352816, 3.210727723 },
{ 0.000520e-6, 10344.295065386, 2.445597761 },
{ 0.000493e-6, 18422.629359098, 1.676939306 },
{ 0.000478e-6, 1265.567478626, 5.487314569 },
{ 0.000472e-6, -18.159247265, 1.999707589 },
/* 271, 280 */
{ 0.000559e-6, 11190.377900137, 5.783236356 },
{ 0.000494e-6, 9623.688276691, 3.022645053 },
{ 0.000463e-6, 5739.157790895, 1.411223013 },
{ 0.000432e-6, 16858.482532933, 1.179256434 },
{ 0.000574e-6, 72140.628666286, 1.758191830 },
{ 0.000484e-6, 17267.268201691, 3.290589143 },
{ 0.000550e-6, 4907.302050146, 0.864024298 },
{ 0.000399e-6, 14.977853527, 2.094441910 },
{ 0.000491e-6, 224.344795702, 0.878372791 },
{ 0.000432e-6, 20426.571092422, 6.003829241 },
/* 281, 290 */
{ 0.000481e-6, 5749.452731634, 4.309591964 },
{ 0.000480e-6, 5757.317038160, 1.142348571 },
{ 0.000485e-6, 6702.560493867, 0.210580917 },
{ 0.000426e-6, 6055.549660552, 4.274476529 },
{ 0.000480e-6, 5959.570433334, 5.031351030 },
{ 0.000466e-6, 12562.628581634, 4.959581597 },
{ 0.000520e-6, 39302.096962196, 4.788002889 },
{ 0.000458e-6, 12132.439962106, 1.880103788 },
{ 0.000470e-6, 12029.347187887, 1.405611197 },
{ 0.000416e-6, -7477.522860216, 1.082356330 },
/* 291, 300 */
{ 0.000449e-6, 11609.862544012, 4.179989585 },
{ 0.000465e-6, 17253.041107690, 0.353496295 },
{ 0.000362e-6, -4535.059436924, 1.583849576 },
{ 0.000383e-6, 21954.157609398, 3.747376371 },
{ 0.000389e-6, 17.252277143, 1.395753179 },
{ 0.000331e-6, 18052.929543158, 0.566790582 },
{ 0.000430e-6, 13517.870106233, 0.685827538 },
{ 0.000368e-6, -5756.908003246, 0.731374317 },
{ 0.000330e-6, 10557.594160824, 3.710043680 },
{ 0.000332e-6, 20199.094959633, 1.652901407 },
/* 301, 310 */
{ 0.000384e-6, 11933.367960670, 5.827781531 },
{ 0.000387e-6, 10454.501386605, 2.541182564 },
{ 0.000325e-6, 15671.081759407, 2.178850542 },
{ 0.000318e-6, 138.517496871, 2.253253037 },
{ 0.000305e-6, 9388.005909415, 0.578340206 },
{ 0.000352e-6, 5749.861766548, 3.000297967 },
{ 0.000311e-6, 6915.859589305, 1.693574249 },
{ 0.000297e-6, 24072.921469776, 1.997249392 },
{ 0.000363e-6, -640.877607382, 5.071820966 },
{ 0.000323e-6, 12592.450019783, 1.072262823 },
/* 311, 320 */
{ 0.000341e-6, 12146.667056108, 4.700657997 },
{ 0.000290e-6, 9779.108676125, 1.812320441 },
{ 0.000342e-6, 6132.028180148, 4.322238614 },
{ 0.000329e-6, 6268.848755990, 3.033827743 },
{ 0.000374e-6, 17996.031168222, 3.388716544 },
{ 0.000285e-6, -533.214083444, 4.687313233 },
{ 0.000338e-6, 6065.844601290, 0.877776108 },
{ 0.000276e-6, 24.298513841, 0.770299429 },
{ 0.000336e-6, -2388.894020449, 5.353796034 },
{ 0.000290e-6, 3097.883822726, 4.075291557 },
/* 321, 330 */
{ 0.000318e-6, 709.933048357, 5.941207518 },
{ 0.000271e-6, 13095.842665077, 3.208912203 },
{ 0.000331e-6, 6073.708907816, 4.007881169 },
{ 0.000292e-6, 742.990060533, 2.714333592 },
{ 0.000362e-6, 29088.811415985, 3.215977013 },
{ 0.000280e-6, 12359.966151546, 0.710872502 },
{ 0.000267e-6, 10440.274292604, 4.730108488 },
{ 0.000262e-6, 838.969287750, 1.327720272 },
{ 0.000250e-6, 16496.361396202, 0.898769761 },
{ 0.000325e-6, 20597.243963041, 0.180044365 },
/* 331, 340 */
{ 0.000268e-6, 6148.010769956, 5.152666276 },
{ 0.000284e-6, 5636.065016677, 5.655385808 },
{ 0.000301e-6, 6080.822454817, 2.135396205 },
{ 0.000294e-6, -377.373607916, 3.708784168 },
{ 0.000236e-6, 2118.763860378, 1.733578756 },
{ 0.000234e-6, 5867.523359379, 5.575209112 },
{ 0.000268e-6, -226858.238553767, 0.069432392 },
{ 0.000265e-6, 167283.761587465, 4.369302826 },
{ 0.000280e-6, 28237.233459389, 5.304829118 },
{ 0.000292e-6, 12345.739057544, 4.096094132 },
/* 341, 350 */
{ 0.000223e-6, 19800.945956225, 3.069327406 },
{ 0.000301e-6, 43232.306658416, 6.205311188 },
{ 0.000264e-6, 18875.525869774, 1.417263408 },
{ 0.000304e-6, -1823.175188677, 3.409035232 },
{ 0.000301e-6, 109.945688789, 0.510922054 },
{ 0.000260e-6, 813.550283960, 2.389438934 },
{ 0.000299e-6, 316428.228673312, 5.384595078 },
{ 0.000211e-6, 5756.566278634, 3.789392838 },
{ 0.000209e-6, 5750.203491159, 1.661943545 },
{ 0.000240e-6, 12489.885628707, 5.684549045 },
/* 351, 360 */
{ 0.000216e-6, 6303.851245484, 3.862942261 },
{ 0.000203e-6, 1581.959348283, 5.549853589 },
{ 0.000200e-6, 5642.198242609, 1.016115785 },
{ 0.000197e-6, -70.849445304, 4.690702525 },
{ 0.000227e-6, 6287.008003254, 2.911891613 },
{ 0.000197e-6, 533.623118358, 1.048982898 },
{ 0.000205e-6, -6279.485421340, 1.829362730 },
{ 0.000209e-6, -10988.808157535, 2.636140084 },
{ 0.000208e-6, -227.526189440, 4.127883842 },
{ 0.000191e-6, 415.552490612, 4.401165650 },
/* 361, 370 */
{ 0.000190e-6, 29296.615389579, 4.175658539 },
{ 0.000264e-6, 66567.485864652, 4.601102551 },
{ 0.000256e-6, -3646.350377354, 0.506364778 },
{ 0.000188e-6, 13119.721102825, 2.032195842 },
{ 0.000185e-6, -209.366942175, 4.694756586 },
{ 0.000198e-6, 25934.124331089, 3.832703118 },
{ 0.000195e-6, 4061.219215394, 3.308463427 },
{ 0.000234e-6, 5113.487598583, 1.716090661 },
{ 0.000188e-6, 1478.866574064, 5.686865780 },
{ 0.000222e-6, 11823.161639450, 1.942386641 },
/* 371, 380 */
{ 0.000181e-6, 10770.893256262, 1.999482059 },
{ 0.000171e-6, 6546.159773364, 1.182807992 },
{ 0.000206e-6, 70.328180442, 5.934076062 },
{ 0.000169e-6, 20995.392966449, 2.169080622 },
{ 0.000191e-6, 10660.686935042, 5.405515999 },
{ 0.000228e-6, 33019.021112205, 4.656985514 },
{ 0.000184e-6, -4933.208440333, 3.327476868 },
{ 0.000220e-6, -135.625325010, 1.765430262 },
{ 0.000166e-6, 23141.558382925, 3.454132746 },
{ 0.000191e-6, 6144.558353121, 5.020393445 },
/* 381, 390 */
{ 0.000180e-6, 6084.003848555, 0.602182191 },
{ 0.000163e-6, 17782.732072784, 4.960593133 },
{ 0.000225e-6, 16460.333529525, 2.596451817 },
{ 0.000222e-6, 5905.702242076, 3.731990323 },
{ 0.000204e-6, 227.476132789, 5.636192701 },
{ 0.000159e-6, 16737.577236597, 3.600691544 },
{ 0.000200e-6, 6805.653268085, 0.868220961 },
{ 0.000187e-6, 11919.140866668, 2.629456641 },
{ 0.000161e-6, 127.471796607, 2.862574720 },
{ 0.000205e-6, 6286.666278643, 1.742882331 },
/* 391, 400 */
{ 0.000189e-6, 153.778810485, 4.812372643 },
{ 0.000168e-6, 16723.350142595, 0.027860588 },
{ 0.000149e-6, 11720.068865232, 0.659721876 },
{ 0.000189e-6, 5237.921013804, 5.245313000 },
{ 0.000143e-6, 6709.674040867, 4.317625647 },
{ 0.000146e-6, 4487.817406270, 4.815297007 },
{ 0.000144e-6, -664.756045130, 5.381366880 },
{ 0.000175e-6, 5127.714692584, 4.728443327 },
{ 0.000162e-6, 6254.626662524, 1.435132069 },
{ 0.000187e-6, 47162.516354635, 1.354371923 },
/* 401, 410 */
{ 0.000146e-6, 11080.171578918, 3.369695406 },
{ 0.000180e-6, -348.924420448, 2.490902145 },
{ 0.000148e-6, 151.047669843, 3.799109588 },
{ 0.000157e-6, 6197.248551160, 1.284375887 },
{ 0.000167e-6, 146.594251718, 0.759969109 },
{ 0.000133e-6, -5331.357443741, 5.409701889 },
{ 0.000154e-6, 95.979227218, 3.366890614 },
{ 0.000148e-6, -6418.140930027, 3.384104996 },
{ 0.000128e-6, -6525.804453965, 3.803419985 },
{ 0.000130e-6, 11293.470674356, 0.939039445 },
/* 411, 420 */
{ 0.000152e-6, -5729.506447149, 0.734117523 },
{ 0.000138e-6, 210.117701700, 2.564216078 },
{ 0.000123e-6, 6066.595360816, 4.517099537 },
{ 0.000140e-6, 18451.078546566, 0.642049130 },
{ 0.000126e-6, 11300.584221356, 3.485280663 },
{ 0.000119e-6, 10027.903195729, 3.217431161 },
{ 0.000151e-6, 4274.518310832, 4.404359108 },
{ 0.000117e-6, 6072.958148291, 0.366324650 },
{ 0.000165e-6, -7668.637425143, 4.298212528 },
{ 0.000117e-6, -6245.048177356, 5.379518958 },
/* 421, 430 */
{ 0.000130e-6, -5888.449964932, 4.527681115 },
{ 0.000121e-6, -543.918059096, 6.109429504 },
{ 0.000162e-6, 9683.594581116, 5.720092446 },
{ 0.000141e-6, 6219.339951688, 0.679068671 },
{ 0.000118e-6, 22743.409379516, 4.881123092 },
{ 0.000129e-6, 1692.165669502, 0.351407289 },
{ 0.000126e-6, 5657.405657679, 5.146592349 },
{ 0.000114e-6, 728.762966531, 0.520791814 },
{ 0.000120e-6, 52.596639600, 0.948516300 },
{ 0.000115e-6, 65.220371012, 3.504914846 },
/* 431, 440 */
{ 0.000126e-6, 5881.403728234, 5.577502482 },
{ 0.000158e-6, 163096.180360983, 2.957128968 },
{ 0.000134e-6, 12341.806904281, 2.598576764 },
{ 0.000151e-6, 16627.370915377, 3.985702050 },
{ 0.000109e-6, 1368.660252845, 0.014730471 },
{ 0.000131e-6, 6211.263196841, 0.085077024 },
{ 0.000146e-6, 5792.741760812, 0.708426604 },
{ 0.000146e-6, -77.750543984, 3.121576600 },
{ 0.000107e-6, 5341.013788022, 0.288231904 },
{ 0.000138e-6, 6281.591377283, 2.797450317 },
/* 441, 450 */
{ 0.000113e-6, -6277.552925684, 2.788904128 },
{ 0.000115e-6, -525.758811831, 5.895222200 },
{ 0.000138e-6, 6016.468808270, 6.096188999 },
{ 0.000139e-6, 23539.707386333, 2.028195445 },
{ 0.000146e-6, -4176.041342449, 4.660008502 },
{ 0.000107e-6, 16062.184526117, 4.066520001 },
{ 0.000142e-6, 83783.548222473, 2.936315115 },
{ 0.000128e-6, 9380.959672717, 3.223844306 },
{ 0.000135e-6, 6205.325306007, 1.638054048 },
{ 0.000101e-6, 2699.734819318, 5.481603249 },
/* 451, 460 */
{ 0.000104e-6, -568.821874027, 2.205734493 },
{ 0.000103e-6, 6321.103522627, 2.440421099 },
{ 0.000119e-6, 6321.208885629, 2.547496264 },
{ 0.000138e-6, 1975.492545856, 2.314608466 },
{ 0.000121e-6, 137.033024162, 4.539108237 },
{ 0.000123e-6, 19402.796952817, 4.538074405 },
{ 0.000119e-6, 22805.735565994, 2.869040566 },
{ 0.000133e-6, 64471.991241142, 6.056405489 },
{ 0.000129e-6, -85.827298831, 2.540635083 },
{ 0.000131e-6, 13613.804277336, 4.005732868 },
/* 461, 470 */
{ 0.000104e-6, 9814.604100291, 1.959967212 },
{ 0.000112e-6, 16097.679950283, 3.589026260 },
{ 0.000123e-6, 2107.034507542, 1.728627253 },
{ 0.000121e-6, 36949.230808424, 6.072332087 },
{ 0.000108e-6, -12539.853380183, 3.716133846 },
{ 0.000113e-6, -7875.671863624, 2.725771122 },
{ 0.000109e-6, 4171.425536614, 4.033338079 },
{ 0.000101e-6, 6247.911759770, 3.441347021 },
{ 0.000113e-6, 7330.728427345, 0.656372122 },
{ 0.000113e-6, 51092.726050855, 2.791483066 },
/* 471, 480 */
{ 0.000106e-6, 5621.842923210, 1.815323326 },
{ 0.000101e-6, 111.430161497, 5.711033677 },
{ 0.000103e-6, 909.818733055, 2.812745443 },
{ 0.000101e-6, 1790.642637886, 1.965746028 },
/* T */
{ 102.156724e-6, 6283.075849991, 4.249032005 },
{ 1.706807e-6, 12566.151699983, 4.205904248 },
{ 0.269668e-6, 213.299095438, 3.400290479 },
{ 0.265919e-6, 529.690965095, 5.836047367 },
{ 0.210568e-6, -3.523118349, 6.262738348 },
{ 0.077996e-6, 5223.693919802, 4.670344204 },
/* 481, 490 */
{ 0.054764e-6, 1577.343542448, 4.534800170 },
{ 0.059146e-6, 26.298319800, 1.083044735 },
{ 0.034420e-6, -398.149003408, 5.980077351 },
{ 0.032088e-6, 18849.227549974, 4.162913471 },
{ 0.033595e-6, 5507.553238667, 5.980162321 },
{ 0.029198e-6, 5856.477659115, 0.623811863 },
{ 0.027764e-6, 155.420399434, 3.745318113 },
{ 0.025190e-6, 5746.271337896, 2.980330535 },
{ 0.022997e-6, -796.298006816, 1.174411803 },
{ 0.024976e-6, 5760.498431898, 2.467913690 },
/* 491, 500 */
{ 0.021774e-6, 206.185548437, 3.854787540 },
{ 0.017925e-6, -775.522611324, 1.092065955 },
{ 0.013794e-6, 426.598190876, 2.699831988 },
{ 0.013276e-6, 6062.663207553, 5.845801920 },
{ 0.011774e-6, 12036.460734888, 2.292832062 },
{ 0.012869e-6, 6076.890301554, 5.333425680 },
{ 0.012152e-6, 1059.381930189, 6.222874454 },
{ 0.011081e-6, -7.113547001, 5.154724984 },
{ 0.010143e-6, 4694.002954708, 4.044013795 },
{ 0.009357e-6, 5486.777843175, 3.416081409 },
/* 501, 510 */
{ 0.010084e-6, 522.577418094, 0.749320262 },
{ 0.008587e-6, 10977.078804699, 2.777152598 },
{ 0.008628e-6, 6275.962302991, 4.562060226 },
{ 0.008158e-6, -220.412642439, 5.806891533 },
{ 0.007746e-6, 2544.314419883, 1.603197066 },
{ 0.007670e-6, 2146.165416475, 3.000200440 },
{ 0.007098e-6, 74.781598567, 0.443725817 },
{ 0.006180e-6, -536.804512095, 1.302642751 },
{ 0.005818e-6, 5088.628839767, 4.827723531 },
{ 0.004945e-6, -6286.598968340, 0.268305170 },
/* 511, 520 */
{ 0.004774e-6, 1349.867409659, 5.808636673 },
{ 0.004687e-6, -242.728603974, 5.154890570 },
{ 0.006089e-6, 1748.016413067, 4.403765209 },
{ 0.005975e-6, -1194.447010225, 2.583472591 },
{ 0.004229e-6, 951.718406251, 0.931172179 },
{ 0.005264e-6, 553.569402842, 2.336107252 },
{ 0.003049e-6, 5643.178563677, 1.362634430 },
{ 0.002974e-6, 6812.766815086, 1.583012668 },
{ 0.003403e-6, -2352.866153772, 2.552189886 },
{ 0.003030e-6, 419.484643875, 5.286473844 },
/* 521, 530 */
{ 0.003210e-6, -7.046236698, 1.863796539 },
{ 0.003058e-6, 9437.762934887, 4.226420633 },
{ 0.002589e-6, 12352.852604545, 1.991935820 },
{ 0.002927e-6, 5216.580372801, 2.319951253 },
{ 0.002425e-6, 5230.807466803, 3.084752833 },
{ 0.002656e-6, 3154.687084896, 2.487447866 },
{ 0.002445e-6, 10447.387839604, 2.347139160 },
{ 0.002990e-6, 4690.479836359, 6.235872050 },
{ 0.002890e-6, 5863.591206116, 0.095197563 },
{ 0.002498e-6, 6438.496249426, 2.994779800 },
/* 531, 540 */
{ 0.001889e-6, 8031.092263058, 3.569003717 },
{ 0.002567e-6, 801.820931124, 3.425611498 },
{ 0.001803e-6, -71430.695617928, 2.192295512 },
{ 0.001782e-6, 3.932153263, 5.180433689 },
{ 0.001694e-6, -4705.732307544, 4.641779174 },
{ 0.001704e-6, -1592.596013633, 3.997097652 },
{ 0.001735e-6, 5849.364112115, 0.417558428 },
{ 0.001643e-6, 8429.241266467, 2.180619584 },
{ 0.001680e-6, 38.133035638, 4.164529426 },
{ 0.002045e-6, 7084.896781115, 0.526323854 },
/* 541, 550 */
{ 0.001458e-6, 4292.330832950, 1.356098141 },
{ 0.001437e-6, 20.355319399, 3.895439360 },
{ 0.001738e-6, 6279.552731642, 0.087484036 },
{ 0.001367e-6, 14143.495242431, 3.987576591 },
{ 0.001344e-6, 7234.794256242, 0.090454338 },
{ 0.001438e-6, 11499.656222793, 0.974387904 },
{ 0.001257e-6, 6836.645252834, 1.509069366 },
{ 0.001358e-6, 11513.883316794, 0.495572260 },
{ 0.001628e-6, 7632.943259650, 4.968445721 },
{ 0.001169e-6, 103.092774219, 2.838496795 },
/* 551, 560 */
{ 0.001162e-6, 4164.311989613, 3.408387778 },
{ 0.001092e-6, 6069.776754553, 3.617942651 },
{ 0.001008e-6, 17789.845619785, 0.286350174 },
{ 0.001008e-6, 639.897286314, 1.610762073 },
{ 0.000918e-6, 10213.285546211, 5.532798067 },
{ 0.001011e-6, -6256.777530192, 0.661826484 },
{ 0.000753e-6, 16730.463689596, 3.905030235 },
{ 0.000737e-6, 11926.254413669, 4.641956361 },
{ 0.000694e-6, 3340.612426700, 2.111120332 },
{ 0.000701e-6, 3894.181829542, 2.760823491 },
/* 561, 570 */
{ 0.000689e-6, -135.065080035, 4.768800780 },
{ 0.000700e-6, 13367.972631107, 5.760439898 },
{ 0.000664e-6, 6040.347246017, 1.051215840 },
{ 0.000654e-6, 5650.292110678, 4.911332503 },
{ 0.000788e-6, 6681.224853400, 4.699648011 },
{ 0.000628e-6, 5333.900241022, 5.024608847 },
{ 0.000755e-6, -110.206321219, 4.370971253 },
{ 0.000628e-6, 6290.189396992, 3.660478857 },
{ 0.000635e-6, 25132.303399966, 4.121051532 },
{ 0.000534e-6, 5966.683980335, 1.173284524 },
/* 571, 580 */
{ 0.000543e-6, -433.711737877, 0.345585464 },
{ 0.000517e-6, -1990.745017041, 5.414571768 },
{ 0.000504e-6, 5767.611978898, 2.328281115 },
{ 0.000485e-6, 5753.384884897, 1.685874771 },
{ 0.000463e-6, 7860.419392439, 5.297703006 },
{ 0.000604e-6, 515.463871093, 0.591998446 },
{ 0.000443e-6, 12168.002696575, 4.830881244 },
{ 0.000570e-6, 199.072001436, 3.899190272 },
{ 0.000465e-6, 10969.965257698, 0.476681802 },
{ 0.000424e-6, -7079.373856808, 1.112242763 },
/* 581, 590 */
{ 0.000427e-6, 735.876513532, 1.994214480 },
{ 0.000478e-6, -6127.655450557, 3.778025483 },
{ 0.000414e-6, 10973.555686350, 5.441088327 },
{ 0.000512e-6, 1589.072895284, 0.107123853 },
{ 0.000378e-6, 10984.192351700, 0.915087231 },
{ 0.000402e-6, 11371.704689758, 4.107281715 },
{ 0.000453e-6, 9917.696874510, 1.917490952 },
{ 0.000395e-6, 149.563197135, 2.763124165 },
{ 0.000371e-6, 5739.157790895, 3.112111866 },
{ 0.000350e-6, 11790.629088659, 0.440639857 },
/* 591, 600 */
{ 0.000356e-6, 6133.512652857, 5.444568842 },
{ 0.000344e-6, 412.371096874, 5.676832684 },
{ 0.000383e-6, 955.599741609, 5.559734846 },
{ 0.000333e-6, 6496.374945429, 0.261537984 },
{ 0.000340e-6, 6055.549660552, 5.975534987 },
{ 0.000334e-6, 1066.495477190, 2.335063907 },
{ 0.000399e-6, 11506.769769794, 5.321230910 },
{ 0.000314e-6, 18319.536584880, 2.313312404 },
{ 0.000424e-6, 1052.268383188, 1.211961766 },
{ 0.000307e-6, 63.735898303, 3.169551388 },
/* 601, 610 */
{ 0.000329e-6, 29.821438149, 6.106912080 },
{ 0.000357e-6, 6309.374169791, 4.223760346 },
{ 0.000312e-6, -3738.761430108, 2.180556645 },
{ 0.000301e-6, 309.278322656, 1.499984572 },
{ 0.000268e-6, 12043.574281889, 2.447520648 },
{ 0.000257e-6, 12491.370101415, 3.662331761 },
{ 0.000290e-6, 625.670192312, 1.272834584 },
{ 0.000256e-6, 5429.879468239, 1.913426912 },
{ 0.000339e-6, 3496.032826134, 4.165930011 },
{ 0.000283e-6, 3930.209696220, 4.325565754 },
/* 611, 620 */
{ 0.000241e-6, 12528.018664345, 3.832324536 },
{ 0.000304e-6, 4686.889407707, 1.612348468 },
{ 0.000259e-6, 16200.772724501, 3.470173146 },
{ 0.000238e-6, 12139.553509107, 1.147977842 },
{ 0.000236e-6, 6172.869528772, 3.776271728 },
{ 0.000296e-6, -7058.598461315, 0.460368852 },
{ 0.000306e-6, 10575.406682942, 0.554749016 },
{ 0.000251e-6, 17298.182327326, 0.834332510 },
{ 0.000290e-6, 4732.030627343, 4.759564091 },
{ 0.000261e-6, 5884.926846583, 0.298259862 },
/* 621, 630 */
{ 0.000249e-6, 5547.199336460, 3.749366406 },
{ 0.000213e-6, 11712.955318231, 5.415666119 },
{ 0.000223e-6, 4701.116501708, 2.703203558 },
{ 0.000268e-6, -640.877607382, 0.283670793 },
{ 0.000209e-6, 5636.065016677, 1.238477199 },
{ 0.000193e-6, 10177.257679534, 1.943251340 },
{ 0.000182e-6, 6283.143160294, 2.456157599 },
{ 0.000184e-6, -227.526189440, 5.888038582 },
{ 0.000182e-6, -6283.008539689, 0.241332086 },
{ 0.000228e-6, -6284.056171060, 2.657323816 },
/* 631, 640 */
{ 0.000166e-6, 7238.675591600, 5.930629110 },
{ 0.000167e-6, 3097.883822726, 5.570955333 },
{ 0.000159e-6, -323.505416657, 5.786670700 },
{ 0.000154e-6, -4136.910433516, 1.517805532 },
{ 0.000176e-6, 12029.347187887, 3.139266834 },
{ 0.000167e-6, 12132.439962106, 3.556352289 },
{ 0.000153e-6, 202.253395174, 1.463313961 },
{ 0.000157e-6, 17267.268201691, 1.586837396 },
{ 0.000142e-6, 83996.847317911, 0.022670115 },
{ 0.000152e-6, 17260.154654690, 0.708528947 },
/* 641, 650 */
{ 0.000144e-6, 6084.003848555, 5.187075177 },
{ 0.000135e-6, 5756.566278634, 1.993229262 },
{ 0.000134e-6, 5750.203491159, 3.457197134 },
{ 0.000144e-6, 5326.786694021, 6.066193291 },
{ 0.000160e-6, 11015.106477335, 1.710431974 },
{ 0.000133e-6, 3634.621024518, 2.836451652 },
{ 0.000134e-6, 18073.704938650, 5.453106665 },
{ 0.000134e-6, 1162.474704408, 5.326898811 },
{ 0.000128e-6, 5642.198242609, 2.511652591 },
{ 0.000160e-6, 632.783739313, 5.628785365 },
/* 651, 660 */
{ 0.000132e-6, 13916.019109642, 0.819294053 },
{ 0.000122e-6, 14314.168113050, 5.677408071 },
{ 0.000125e-6, 12359.966151546, 5.251984735 },
{ 0.000121e-6, 5749.452731634, 2.210924603 },
{ 0.000136e-6, -245.831646229, 1.646502367 },
{ 0.000120e-6, 5757.317038160, 3.240883049 },
{ 0.000134e-6, 12146.667056108, 3.059480037 },
{ 0.000137e-6, 6206.809778716, 1.867105418 },
{ 0.000141e-6, 17253.041107690, 2.069217456 },
{ 0.000129e-6, -7477.522860216, 2.781469314 },
/* 661, 670 */
{ 0.000116e-6, 5540.085789459, 4.281176991 },
{ 0.000116e-6, 9779.108676125, 3.320925381 },
{ 0.000129e-6, 5237.921013804, 3.497704076 },
{ 0.000113e-6, 5959.570433334, 0.983210840 },
{ 0.000122e-6, 6282.095528923, 2.674938860 },
{ 0.000140e-6, -11.045700264, 4.957936982 },
{ 0.000108e-6, 23543.230504682, 1.390113589 },
{ 0.000106e-6, -12569.674818332, 0.429631317 },
{ 0.000110e-6, -266.607041722, 5.501340197 },
{ 0.000115e-6, 12559.038152982, 4.691456618 },
/* 671, 680 */
{ 0.000134e-6, -2388.894020449, 0.577313584 },
{ 0.000109e-6, 10440.274292604, 6.218148717 },
{ 0.000102e-6, -543.918059096, 1.477842615 },
{ 0.000108e-6, 21228.392023546, 2.237753948 },
{ 0.000101e-6, -4535.059436924, 3.100492232 },
{ 0.000103e-6, 76.266071276, 5.594294322 },
{ 0.000104e-6, 949.175608970, 5.674287810 },
{ 0.000101e-6, 13517.870106233, 2.196632348 },
{ 0.000100e-6, 11933.367960670, 4.056084160 },
/* T^2 */
{ 4.322990e-6, 6283.075849991, 2.642893748 },
/* 681, 690 */
{ 0.406495e-6, 0.000000000, 4.712388980 },
{ 0.122605e-6, 12566.151699983, 2.438140634 },
{ 0.019476e-6, 213.299095438, 1.642186981 },
{ 0.016916e-6, 529.690965095, 4.510959344 },
{ 0.013374e-6, -3.523118349, 1.502210314 },
{ 0.008042e-6, 26.298319800, 0.478549024 },
{ 0.007824e-6, 155.420399434, 5.254710405 },
{ 0.004894e-6, 5746.271337896, 4.683210850 },
{ 0.004875e-6, 5760.498431898, 0.759507698 },
{ 0.004416e-6, 5223.693919802, 6.028853166 },
/* 691, 700 */
{ 0.004088e-6, -7.113547001, 0.060926389 },
{ 0.004433e-6, 77713.771467920, 3.627734103 },
{ 0.003277e-6, 18849.227549974, 2.327912542 },
{ 0.002703e-6, 6062.663207553, 1.271941729 },
{ 0.003435e-6, -775.522611324, 0.747446224 },
{ 0.002618e-6, 6076.890301554, 3.633715689 },
{ 0.003146e-6, 206.185548437, 5.647874613 },
{ 0.002544e-6, 1577.343542448, 6.232904270 },
{ 0.002218e-6, -220.412642439, 1.309509946 },
{ 0.002197e-6, 5856.477659115, 2.407212349 },
/* 701, 710 */
{ 0.002897e-6, 5753.384884897, 5.863842246 },
{ 0.001766e-6, 426.598190876, 0.754113147 },
{ 0.001738e-6, -796.298006816, 2.714942671 },
{ 0.001695e-6, 522.577418094, 2.629369842 },
{ 0.001584e-6, 5507.553238667, 1.341138229 },
{ 0.001503e-6, -242.728603974, 0.377699736 },
{ 0.001552e-6, -536.804512095, 2.904684667 },
{ 0.001370e-6, -398.149003408, 1.265599125 },
{ 0.001889e-6, -5573.142801634, 4.413514859 },
{ 0.001722e-6, 6069.776754553, 2.445966339 },
/* 711, 720 */
{ 0.001124e-6, 1059.381930189, 5.041799657 },
{ 0.001258e-6, 553.569402842, 3.849557278 },
{ 0.000831e-6, 951.718406251, 2.471094709 },
{ 0.000767e-6, 4694.002954708, 5.363125422 },
{ 0.000756e-6, 1349.867409659, 1.046195744 },
{ 0.000775e-6, -11.045700264, 0.245548001 },
{ 0.000597e-6, 2146.165416475, 4.543268798 },
{ 0.000568e-6, 5216.580372801, 4.178853144 },
{ 0.000711e-6, 1748.016413067, 5.934271972 },
{ 0.000499e-6, 12036.460734888, 0.624434410 },
/* 721, 730 */
{ 0.000671e-6, -1194.447010225, 4.136047594 },
{ 0.000488e-6, 5849.364112115, 2.209679987 },
{ 0.000621e-6, 6438.496249426, 4.518860804 },
{ 0.000495e-6, -6286.598968340, 1.868201275 },
{ 0.000456e-6, 5230.807466803, 1.271231591 },
{ 0.000451e-6, 5088.628839767, 0.084060889 },
{ 0.000435e-6, 5643.178563677, 3.324456609 },
{ 0.000387e-6, 10977.078804699, 4.052488477 },
{ 0.000547e-6, 161000.685737473, 2.841633844 },
{ 0.000522e-6, 3154.687084896, 2.171979966 },
/* 731, 740 */
{ 0.000375e-6, 5486.777843175, 4.983027306 },
{ 0.000421e-6, 5863.591206116, 4.546432249 },
{ 0.000439e-6, 7084.896781115, 0.522967921 },
{ 0.000309e-6, 2544.314419883, 3.172606705 },
{ 0.000347e-6, 4690.479836359, 1.479586566 },
{ 0.000317e-6, 801.820931124, 3.553088096 },
{ 0.000262e-6, 419.484643875, 0.606635550 },
{ 0.000248e-6, 6836.645252834, 3.014082064 },
{ 0.000245e-6, -1592.596013633, 5.519526220 },
{ 0.000225e-6, 4292.330832950, 2.877956536 },
/* 741, 750 */
{ 0.000214e-6, 7234.794256242, 1.605227587 },
{ 0.000205e-6, 5767.611978898, 0.625804796 },
{ 0.000180e-6, 10447.387839604, 3.499954526 },
{ 0.000229e-6, 199.072001436, 5.632304604 },
{ 0.000214e-6, 639.897286314, 5.960227667 },
{ 0.000175e-6, -433.711737877, 2.162417992 },
{ 0.000209e-6, 515.463871093, 2.322150893 },
{ 0.000173e-6, 6040.347246017, 2.556183691 },
{ 0.000184e-6, 6309.374169791, 4.732296790 },
{ 0.000227e-6, 149854.400134205, 5.385812217 },
/* 751, 760 */
{ 0.000154e-6, 8031.092263058, 5.120720920 },
{ 0.000151e-6, 5739.157790895, 4.815000443 },
{ 0.000197e-6, 7632.943259650, 0.222827271 },
{ 0.000197e-6, 74.781598567, 3.910456770 },
{ 0.000138e-6, 6055.549660552, 1.397484253 },
{ 0.000149e-6, -6127.655450557, 5.333727496 },
{ 0.000137e-6, 3894.181829542, 4.281749907 },
{ 0.000135e-6, 9437.762934887, 5.979971885 },
{ 0.000139e-6, -2352.866153772, 4.715630782 },
{ 0.000142e-6, 6812.766815086, 0.513330157 },
/* 761, 770 */
{ 0.000120e-6, -4705.732307544, 0.194160689 },
{ 0.000131e-6, -71430.695617928, 0.000379226 },
{ 0.000124e-6, 6279.552731642, 2.122264908 },
{ 0.000108e-6, -6256.777530192, 0.883445696 },
/* T^3 */
{ 0.143388e-6, 6283.075849991, 1.131453581 },
{ 0.006671e-6, 12566.151699983, 0.775148887 },
{ 0.001480e-6, 155.420399434, 0.480016880 },
{ 0.000934e-6, 213.299095438, 6.144453084 },
{ 0.000795e-6, 529.690965095, 2.941595619 },
{ 0.000673e-6, 5746.271337896, 0.120415406 },
/* 771, 780 */
{ 0.000672e-6, 5760.498431898, 5.317009738 },
{ 0.000389e-6, -220.412642439, 3.090323467 },
{ 0.000373e-6, 6062.663207553, 3.003551964 },
{ 0.000360e-6, 6076.890301554, 1.918913041 },
{ 0.000316e-6, -21.340641002, 5.545798121 },
{ 0.000315e-6, -242.728603974, 1.884932563 },
{ 0.000278e-6, 206.185548437, 1.266254859 },
{ 0.000238e-6, -536.804512095, 4.532664830 },
{ 0.000185e-6, 522.577418094, 4.578313856 },
{ 0.000245e-6, 18849.227549974, 0.587467082 },
/* 781, 787 */
{ 0.000180e-6, 426.598190876, 5.151178553 },
{ 0.000200e-6, 553.569402842, 5.355983739 },
{ 0.000141e-6, 5223.693919802, 1.336556009 },
{ 0.000104e-6, 5856.477659115, 4.239842759 },
/* T^4 */
{ 0.003826e-6, 6283.075849991, 5.705257275 },
{ 0.000303e-6, 12566.151699983, 5.407132842 },
{ 0.000209e-6, 155.420399434, 1.989815753 }
};
/* Time since J2000.0 in Julian millennia. */
t = ((date1 - DJ00) + date2) / DJM;
/* ================= */
/* Topocentric terms */
/* ================= */
/* Convert UT to local solar time in radians. */
tsol = fmod(ut, 1.0) * D2PI + elong;
/* FUNDAMENTAL ARGUMENTS: Simon et al. 1994. */
/* Combine time argument (millennia) with deg/arcsec factor. */
w = t / 3600.0;
/* Sun Mean Longitude. */
elsun = fmod(280.46645683 + 1296027711.03429 * w, 360.0) * DD2R;
/* Sun Mean Anomaly. */
emsun = fmod(357.52910918 + 1295965810.481 * w, 360.0) * DD2R;
/* Mean Elongation of Moon from Sun. */
d = fmod(297.85019547 + 16029616012.090 * w, 360.0) * DD2R;
/* Mean Longitude of Jupiter. */
elj = fmod(34.35151874 + 109306899.89453 * w, 360.0) * DD2R;
/* Mean Longitude of Saturn. */
els = fmod(50.07744430 + 44046398.47038 * w, 360.0) * DD2R;
/* TOPOCENTRIC TERMS: Moyer 1981 and Murray 1983. */
wt = + 0.00029e-10 * u * sin(tsol + elsun - els)
+ 0.00100e-10 * u * sin(tsol - 2.0 * emsun)
+ 0.00133e-10 * u * sin(tsol - d)
+ 0.00133e-10 * u * sin(tsol + elsun - elj)
- 0.00229e-10 * u * sin(tsol + 2.0 * elsun + emsun)
- 0.02200e-10 * v * cos(elsun + emsun)
+ 0.05312e-10 * u * sin(tsol - emsun)
- 0.13677e-10 * u * sin(tsol + 2.0 * elsun)
- 1.31840e-10 * v * cos(elsun)
+ 3.17679e-10 * u * sin(tsol);
/* ===================== */
/* Fairhead et al. model */
/* ===================== */
/* T**0 */
w0 = 0;
for (j = 473; j >= 0; j--) {
w0 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**1 */
w1 = 0;
for (j = 678; j >= 474; j--) {
w1 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**2 */
w2 = 0;
for (j = 763; j >= 679; j--) {
w2 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**3 */
w3 = 0;
for (j = 783; j >= 764; j--) {
w3 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**4 */
w4 = 0;
for (j = 786; j >= 784; j--) {
w4 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* Multiply by powers of T and combine. */
wf = t * (t * (t * (t * w4 + w3) + w2) + w1) + w0;
/* Adjustments to use JPL planetary masses instead of IAU. */
wj = 0.00065e-6 * sin(6069.776754 * t + 4.021194) +
0.00033e-6 * sin( 213.299095 * t + 5.543132) +
(-0.00196e-6 * sin(6208.294251 * t + 5.696701)) +
(-0.00173e-6 * sin( 74.781599 * t + 2.435900)) +
0.03638e-6 * t * t;
/* ============ */
/* Final result */
/* ============ */
/* TDB-TT in seconds. */
w = wt + wf + wj;
return w;
/*-----------------------------------------------------------------------
**
** Copyright (C) 2008
** Standards Of Fundamental Astronomy Review Board
** of the International Astronomical Union.
**
** =====================
** SOFA Software License
** =====================
**
** NOTICE TO USER:
**
** BY USING THIS SOFTWARE YOU ACCEPT THE FOLLOWING TERMS AND CONDITIONS
** WHICH APPLY TO ITS USE.
**
** 1. The Software is owned by the IAU SOFA Review Board ("the Board").
**
** 2. Permission is granted to anyone to use the SOFA software for any
** purpose, including commercial applications, free of charge and
** without payment of royalties, subject to the conditions and
** restrictions listed below.
**
** 3. You (the user) may copy and adapt the SOFA software and its
** algorithms for your own purposes and you may copy and distribute
** a resulting "derived work" to others on a world-wide, royalty-free
** basis, provided that the derived work complies with the following
** requirements:
**
** a) Your work shall be marked or carry a statement that it (i) uses
** routines and computations derived by you from software provided
** by SOFA under license to you; and (ii) does not contain
** software provided by SOFA or software that has been distributed
** by or endorsed by SOFA.
**
** b) The source code of your derived work must contain descriptions
** of how the derived work is based upon and/or differs from the
** original SOFA software.
**
** c) The name(s) of all routine(s) that you distribute shall differ
** from the SOFA names, even when the SOFA content has not been
** otherwise changed.
**
** d) The routine-naming prefix "iau" shall not be used.
**
** e) The origin of the SOFA components of your derived work must not
** be misrepresented; you must not claim that you wrote the
** original software, nor file a patent application for SOFA
** software or algorithms embedded in the SOFA software.
**
** f) These requirements must be reproduced intact in any source
** distribution and shall apply to anyone to whom you have granted
** a further right to modify the source code of your derived work.
**
** 4. In any published work or commercial products which includes
** results achieved by using the SOFA software, you shall acknowledge
** that the SOFA software was used in obtaining those results.
**
** 5. You shall not cause the SOFA software to be brought into
** disrepute, either by misuse, or use for inappropriate tasks, or by
** inappropriate modification.
**
** 6. The SOFA software is provided "as is" and the Board makes no
** warranty as to its use or performance. The Board does not and
** cannot warrant the performance or results which the user may obtain
** by using the SOFA software. The Board makes no warranties, express
** or implied, as to non-infringement of third party rights,
** merchantability, or fitness for any particular purpose. In no
** event will the Board be liable to the user for any consequential,
** incidental, or special damages, including any lost profits or lost
** savings, even if a Board representative has been advised of such
** damages, or for any claim by any third party.
**
** 7. The provision of any version of the SOFA software under the terms
** and conditions specified herein does not imply that future
** versions will also be made available under the same terms and
** conditions.
**
** Correspondence concerning SOFA software should be addressed as
** follows:
**
** Internet email: sofa@rl.ac.uk
** Postal address: IAU SOFA Center
** Rutherford Appleton Laboratory
** Chilton, Didcot, Oxon OX11 0QX
** United Kingdom
**
**---------------------------------------------------------------------*/
}
|
openmp_all.c | #include <stdio.h>
#include <getopt.h>
#include <omp.h>
#include <math.h>
#include <string.h>
#ifdef __APPLE__
#include <cblas.h>
#define set_num_threads(x) openblas_set_num_threads(x)
#define get_num_threads() openblas_get_num_threads()
#else
#include <mkl.h>
#define set_num_threads(x) mkl_set_num_threads(x)
#define get_num_threads() mkl_get_num_threads()
#endif
double norm_inf_mat_Y11(double *X, int n, int extra);
int main(int argc, char **argv) {
int n, i, j;
char *filename = NULL;
double tolerance = 1e-8;
int option = 0;
int threads_num = 1;
while ((option = getopt(argc, argv, "t:f:")) != -1) {
switch (option) {
case 'f':
filename = optarg;
break;
case 't':
threads_num = atoi(optarg);
break;
default:
printf("Usage: mexp -f string \n");
return 0;
}
}
if (filename == NULL) {
printf("Usage: mexp -t num_threads -f string \n");
return 0;
}
double *A = NULL, *R = NULL, *B = NULL, *W = NULL;
double *x, *r, *p, *w;
FILE *file = fopen(filename, "rb");
if (!file) {
printf("Unable to open file!");
return 0;
}
fread(&n, sizeof(int), 1, file);
A = (double *) malloc((n + 1) * (n + 1) * sizeof(double));
R = (double *) malloc((n + 1) * (n + 1) * sizeof(double));
B = (double *) malloc((n + 1) * sizeof(double));
W = (double *) malloc(n * sizeof(double));
x = (double *) malloc(n * sizeof(double));
r = (double *) malloc(n * sizeof(double));
p = (double *) malloc(n * sizeof(double));
w = (double *) malloc(n * sizeof(double));
printf("set_threads_num:%d\t", threads_num++);
fseek(file, 0, SEEK_SET);
fread(&n, sizeof(int), 1, file);
//fseek(file, sizeof(int), SEEK_SET);
memset(A, 0, sizeof(double) * (n + 1) * (n + 1));
memset(R, 0, sizeof(double) * (n + 1) * (n + 1));
for (i = 0; i < n; ++i) {
fread(A + i * (n + 1), sizeof(double), (size_t) n, file);
}
memset(B, 0, sizeof(double) * (n + 1));
fread(B, sizeof(double), (size_t) n, file);
set_num_threads(threads_num);
omp_set_num_threads(threads_num);
cblas_dcopy(n, B, 1, A + n, n + 1);
double omp_time = omp_get_wtime();
double lambda = norm_inf_mat_Y11(A, n, 1);
//init Y matrix
for (i = 0; i < n; ++i) {
double *A_T = A + i * (n + 1);
#pragma omp parallel for
for (j = 0; j < n; ++j) {
A_T[j] = -A_T[j] / lambda;
}
}
#pragma omp parallel for
for (i = 0; i < (n + 1) * (n + 1); i += n + 2) {
A[i] += 1;
}
#pragma omp parallel for
for (i = n; i < (n + 1) * n; i += n + 1) {
A[i] /= lambda;
}
#pragma omp parallel for
for (i = 0; i < n; ++i) {
double *A_T = A + n * (n + 1);
A_T[i] = 0.0;
}
A[(n + 1) * (n + 1)] = 1.0;
//loop
double a, b, *T;
size_t index;
int itern = 0;
while (1) {
a = norm_inf_mat_Y11(A, n, 1);
index = cblas_idamax(n, A + n, n + 1);
b = A[(index + 1) * (n + 1) - 1];
if (a / b < tolerance)
break;
else {
//R = A*A
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, n + 1, n + 1,
n + 1, 1.0, A, n + 1, A, n + 1, 0.0, R, n + 1);
T = A;
A = R;
R = T;
itern++;
}
}
omp_time = omp_get_wtime() - omp_time;
cblas_dcopy(n, A + n, n + 1, B, 1);
/*
* check result
*/
for (i = 0; i < n; ++i) {
if (fabs(B[i] - 1) > 1e-3) {
printf("MEXP ERR %f\t", B[i]);
break;
}
}
printf("mexp_iter:%d\t", itern);
printf("omp_time:%f\t", omp_time);
fseek(file, 0, SEEK_SET);
fread(&n, sizeof(int), 1, file);
fread(A, sizeof(double), (size_t) (n * n), file);
memset(B, 0, sizeof(double) * (n + 1));
fread(B, sizeof(double), (size_t) n, file);
#pragma omp parallel for
for (i = 0; i < n; ++i) {
W[i] = 1.0;
//B[i] /= A[i*(n+1)];
//cblas_dscal(n,1/A[i*(n+1)],A+i*n,1);
}
omp_time = omp_get_wtime();
int iteration = 0;
double alpha, beta, rho_new = 0.0, rho_old;
memset(x, 0, sizeof(double) * n);
cblas_dcopy(n, B, 1, r, 1); //r <- B
rho_new = cblas_ddot(n, r, 1, r, 1);
double w_norm2 = sqrt(rho_new);
while (1) {
if (sqrt(rho_new) / w_norm2 < tolerance) {
break;
}
iteration++;
if (iteration == 1) {
cblas_dcopy(n, r, 1, p, 1); //p <- r
} else {
beta = rho_new / rho_old;
cblas_dscal(n, beta, p, 1); // p <- beta * p
cblas_daxpy(n, 1.0, r, 1, p, 1); // p <- r + pcd
}
cblas_dsymv(CblasRowMajor, CblasUpper, n, 1.0, A, n, p, 1, 0.0, w, 1);
alpha = rho_new / cblas_ddot(n, p, 1, w, 1);
cblas_daxpy(n, alpha, p, 1, x, 1);
cblas_daxpy(n, -alpha, w, 1, r, 1); // r = r - alpha* w
rho_old = rho_new;
rho_new = cblas_ddot(n, r, 1, r, 1); //rho_new = r' dot r;
}
printf("cg_it: %d\t", iteration);
printf("cg_omp_time: %f", omp_get_wtime() - omp_time);
/*
* check result
*/
for (i = 0; i < n; ++i) {
if (fabs(x[i] - 1) > 1e-3) {
printf(" CG ERR %f\t", x[i]);
break;
}
}
printf("\n");
free(A);
free(B);
free(W);
free(R);
free(x);
free(r);
free(p);
free(w);
fclose(file);
}
double norm_inf_mat_Y11(double *X, int n, int extra) {
double lambda = 0.0, lambda_t;
int i;
for (i = 0; i < n; ++i) {
lambda_t = cblas_dasum(n, X + (i * (n + extra)), 1);
if (lambda_t > lambda)
lambda = lambda_t;
}
return lambda;
}
|
GB_binop__pow_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16)
// C=scalar+B GB (_bind1st__pow_uint16)
// C=scalar+B' GB (_bind1st_tran__pow_uint16)
// C=A+scalar GB (_bind2nd__pow_uint16)
// C=A'+scalar GB (_bind2nd_tran__pow_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_pow_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_udr.1.c | /*
* @@name: udr.1.c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
#include <limits.h>
struct point {
int x;
int y;
};
void minproc ( struct point *out, struct point *in )
{
if ( in->x < out->x ) out->x = in->x;
if ( in->y < out->y ) out->y = in->y;
}
void maxproc ( struct point *out, struct point *in )
{
if ( in->x > out->x ) out->x = in->x;
if ( in->y > out->y ) out->y = in->y;
}
#pragma omp declare reduction(min : struct point : \
minproc(&omp_out, &omp_in)) \
initializer( omp_priv = { INT_MAX, INT_MAX } )
#pragma omp declare reduction(max : struct point : \
maxproc(&omp_out, &omp_in)) \
initializer( omp_priv = { 0, 0 } )
void find_enclosing_rectangle ( int n, struct point points[] )
{
struct point minp = { INT_MAX, INT_MAX }, maxp = {0,0};
int i;
#pragma omp parallel for reduction(min:minp) reduction(max:maxp)
for ( i = 0; i < n; i++ ) {
minproc(&minp, &points[i]);
maxproc(&maxp, &points[i]);
}
printf("min = (%d, %d)\n", minp.x, minp.y);
printf("max = (%d, %d)\n", maxp.x, maxp.y);
}
|
seidel-2d.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 20x1000. */
#include "seidel-2d.h"
/* Array initialization. */
static
void init_array (int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ") final)")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ") final)")));
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
A[i][j] = ((DATA_TYPE) i*(j+2) + 2) / n;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, A[i][j]);
if ((i * n + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_seidel_2d(int tsteps,
int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int t, i, j;
#pragma scop
#pragma omp parallel private (t,i,j)
{
#pragma omp master
{
for (t = 0; t <= _PB_TSTEPS - 1; t++) {
#pragma omp parallel for schedule(static) collapse (2)
for (i = 1; i<= _PB_N - 2; i++) {
for (j = 1; j <= _PB_N - 2; j++) {
A[i][j] = (A[i-1][j-1] + A[i-1][j] + A[i-1][j+1]
+ A[i][j-1] + A[i][j] + A[i][j+1]
+ A[i+1][j-1] + A[i+1][j] + A[i+1][j+1])/9.0;
}
}
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int tsteps = TSTEPS;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE __attribute__((annotate("target('A') scalar()"))), N, N, n, n);
/* Initialize array(s). */
init_array (n, POLYBENCH_ARRAY(A));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_seidel_2d (tsteps, n, POLYBENCH_ARRAY(A));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(A)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
return 0;
}
|
GB_unaryop__lnot_fp64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_int8
// op(A') function: GB_tran__lnot_fp64_int8
// C type: double
// A type: int8_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_int8
(
double *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__islt_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int64)
// A*D function (colscale): GB (_AxD__islt_int64)
// D*A function (rowscale): GB (_DxB__islt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int64)
// C=scalar+B GB (_bind1st__islt_int64)
// C=scalar+B' GB (_bind1st_tran__islt_int64)
// C=A+scalar GB (_bind2nd__islt_int64)
// C=A'+scalar GB (_bind2nd_tran__islt_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT64 || GxB_NO_ISLT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_emult_03.c | //------------------------------------------------------------------------------
// GB_emult_03: C<M>= A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity
// structure as M, and its pattern is a subset of M.
// ------------------------------------------
// C <M>= A .* B
// ------------------------------------------
// sparse sparse bitmap bitmap (method: 03)
// sparse sparse bitmap full (method: 03)
// sparse sparse full bitmap (method: 03)
// sparse sparse full full (method: 03)
// TODO: this function can also do eWiseAdd, just as easily.
// Just change the "&&" to "||" in the GB_emult_03_template.
// If A and B are both full, eadd and emult are identical.
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (M_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_03 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // sparse/hyper, not NULL
const bool Mask_struct, // if true, use the only structure of M
bool *mask_applied, // if true, the mask was applied
const GrB_Matrix A, // input A matrix (bitmap/full)
const GrB_Matrix B, // input B matrix (bitmap/full)
const GrB_BinaryOp op, // op to perform C = op (A,B)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK (M, "M for emult_03", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_03", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_03", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_03", GB0) ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ;
int C_sparsity = GB_sparsity (M) ;
GBURBLE ("emult_03:(%s<%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (M),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ;
const int64_t vlen = M->vlen ;
const int64_t vdim = M->vdim ;
const int64_t nvec = M->nvec ;
const int64_t mnz = GB_nnz (M) ;
const size_t msize = M->type->size ;
const int8_t *restrict Ab = A->b ;
const int8_t *restrict Bb = B->b ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, M->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the mask matrix M
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int M_ntasks, M_nthreads ;
GB_SLICE_MATRIX (M, 8, chunk) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + M_ntasks ;
Cp_kfirst = Work + M_ntasks * 2 ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
// TODO: if M is structural and A and B are both full, then C has exactly
// the same pattern as M, the first phase can be skipped.
int tid ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < M_ntasks ; tid++)
{
int64_t kfirst = kfirst_Mslice [tid] ;
int64_t klast = klast_Mslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Mh, k) ;
int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j)
int64_t pM, pM_end ;
GB_get_pA (&pM, &pM_end, tid, k,
kfirst, klast, pstart_Mslice, Mp, vlen) ;
int64_t cjnz = 0 ;
for ( ; pM < pM_end ; pM++)
{
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
cjnz +=
(GBB (Ab, pstart + i)
&& // TODO: for GB_add, use || instead
GBB (Bb, pstart + i)) ;
}
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
//--------------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//--------------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead
if (GB_IS_HYPERSPARSE (M))
{
// copy M->h into C->h
GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ;
}
C->nvec = nvec ;
C->jumbled = M->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
#define GB_ISO_EMULT
#include "GB_emult_03_template.c"
}
else
{
//----------------------------------------------------------------------
// C is non-iso
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_03(mult,xname) GB (_AemultB_03_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_03(mult,xname) (C, M, Mask_struct, A, B, \
Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker
//----------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_03: %s) ", op->name) ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD_03, Cp_kfirst,
M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0,
M, Mask_struct, false, A, B, Context) ;
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "C output for emult_03", GB0) ;
(*mask_applied) = true ;
return (GrB_SUCCESS) ;
}
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Example use of firstprivate()
*/
void foo(int * a, int n, int g)
{
int i;
#pragma omp parallel for private(i )
for (i=0;i<n;i++)
{
a[i] = a[i]+g;
}
}
int a[100];
int main()
{
int i;
int n = 100;
#pragma omp parallel for private(i )
for (i=0;i<n;i++)
{
a[i] = i;
}
foo(a, 100, 7);
for (i=0;i<n;i++)
{
printf("%d\n",a[i]);
}
return 0;
}
|
stream.c | // Copyright 2009-2020 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2020, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
printf("\n\n\nHello CramSim!!!!\n");
printf("Run a stream application with ariel and cramsim\n");
printf("------------------------------------------------------\n");
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) malloc(sizeof(double) * LENGTH);
double* b = (double*) malloc(sizeof(double) * LENGTH);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
nested_parallel_for_block_omp.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
/* Nested Pragma omp parallel for directives evaluation
* Output: avg time
*/
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#define NUM 1000
#define NUM_REPS 10
int in[NUM][NUM];
int out[NUM][NUM];
/* Vector initialization */
void init(void)
{
int i, j;
for (i = 0; i < NUM; i++) {
for (j = 0; j < NUM; j++) {
in[i][j] = 1;
out[i][j] = 0;
}
}
}
int comp(int v)
{
int i;
double ret = 0.0;
for (i = 0; i < 100; i++) {
ret += sqrt(cos((double)v) * sin((double)v));
}
return (int)ret;
}
void petsc_voodoo(int x)
{
int j;
#pragma omp parallel for
for (j = 0; j < NUM; j++) {
out[x][j] = comp(in[x][j]);
}
}
void check(void)
{
int i, j;
for (i = 0; i < NUM; i++) {
for (j = 0; j < NUM; j++) {
int expected = comp(in[i][j]);
if (out[i][j] != expected) {
printf("out[%d][%d]=%d expected=%d\n", i, j, out[i][j], expected);
return;
}
}
}
printf("Verification: SUCCESS\n");
}
int main(int argc, char *argv[])
{
int i, j, r, nthreads;
double *time, avg_time = 0.0;
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
int in_th = (argc > 1) ? atoi(argv[1]) : nthreads;
int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS;
time = (double *)malloc(sizeof(double) * rep);
init();
for (r = 0; r < rep; r++) {
time[r] = omp_get_wtime();
#pragma omp parallel for
for (i = 0; i < NUM; i++) {
omp_set_num_threads(in_th);
petsc_voodoo(i);
}
time[r] = omp_get_wtime() - time[r];
avg_time += time[r];
}
avg_time /= rep;
printf("%d %d %f\n", nthreads, in_th, avg_time);
check();
free(time);
return EXIT_SUCCESS;
}
|
LinearAlgebra.h | namespace tom {
/** Devide the given `matrix` by its element-sum, i.e., normalize the matrix to have an element-sum of one, and return the element-sum.
*/
template<typename T>
double normalize(const DenseBase<T> &matrix) {
double mat_sum = matrix.sum();
const_cast< DenseBase<T> & >(matrix) /= mat_sum;
return mat_sum;
}
SWIGCODE(%template(normalize) normalize<MatrixMd>;)
/** Devide each column of the given `matrix` by its sum, i.e., normalize the columns to have column-sum one. Return `true` if successful, or `false` if a column could not be normalized due to a zero column-sum.
*/
template<typename T>
bool normalizeCols(const DenseBase<T> &matrix) {
double col_sum;
bool success = true;
for (int j = 0; j < matrix.cols(); ++j) {
col_sum = matrix.col(j).sum();
if (col_sum == 0) { success = false; }
const_cast< DenseBase<T> & >(matrix).col(j) /= col_sum;
}
return success;
}
SWIGCODE(%template(normalizeCols) normalizeCols<MatrixMd>;)
/** Devide each row of the given `matrix` by its sum, i.e., normalize the rows to have row-sum one. Return `true` if successful, or `false` if a row could not be normalized due to a zero row-sum.
*/
template<typename T>
bool normalizeRows(const DenseBase<T> &matrix) {
double row_sum;
bool success = true;
for (int i = 0; i < matrix.rows(); ++i) {
row_sum = matrix.row(i).sum();
if (row_sum == 0) { success = false; }
const_cast< DenseBase<T> & >(matrix).row(i) /= row_sum;
}
return success;
}
SWIGCODE(%template(normalizeRows) normalizeRows<MatrixMd>;)
/**
* Return the Kronecker-product \f$A\otimes B\f$ of the matrices `A` and `B`. */
template< typename D1, typename D2 >
MatrixXd kron(const MatrixBase<D1>& A, const MatrixBase<D2>& B) {
MatrixXd result(A.rows() * B.rows(), A.cols() * B.cols());
for (long j = 0; j < A.cols(); ++j) {
for (long i = 0; i < A.rows(); ++i) {
result.block(i * B.rows(), j * B.cols(), B.rows(), B.cols()) = A(i,j) * B;
}
}
return result;
}
SWIGCODE(%template(kron) kron<MatrixMd, MatrixMd>;)
SWIGCODE(%kwargs;)
/** Return the column-wise generalized mean with exponent `p` (default 1) of the given `matrix`. For `p` = 1, 0, -1 this is the arithmetic, geometric and harmonic mean, respectively.
*
* Note that for values of `p` other than {1, 2k} this requires all matrix entries to be positive.
*/
template<typename T>
RowVectorXd colwiseMean(const MatrixBase <T> &matrix, double p = 1.0) {
RowVectorXd result(matrix.cols());
if (p == std::numeric_limits<double>::infinity()) { result = matrix.array().colwise().maxCoeff(); }
else if (p == - std::numeric_limits<double>::infinity()) { result = matrix.array().colwise().minCoeff(); }
else if (p == 0) { // geometric mean
result = matrix.array().abs().log().colwise().sum() / matrix.rows();
result = result.array().exp();
} else if (p == 1) { // arithmetic mean
result = matrix.array().colwise().sum() / matrix.rows();
} else {
result = matrix.array().pow(p).colwise().sum() / matrix.rows();
result = result.array().pow(1.0 / p);
}
return result;
}
SWIGCODE(%template(colwiseMean) colwiseMean<MatrixMd>;)
/** Return the row-wise generalized mean with exponent `p` (default 1) of the given `matrix`. For `p` = 1, 0, -1 this is the arithmetic, geometric and harmonic mean, respectively.
*
* Note that for values of `p` other than {1, 2k} this requires all matrix entries to be positive.
*/
template<typename T>
VectorXd rowwiseMean(const MatrixBase <T> &matrix, double p = 1.0) { return colwiseMean(matrix.transpose(), p).transpose(); }
SWIGCODE(%template(rowwiseMean) rowwiseMean<MatrixMd>;)
/** Return the weighted norm of `M` with weights given in `W`, or the squared weighted norm if `squared` is set to `true`.
* Depending on the size of `W`, the given weights are interpreted in different ways, assuming `M` is of size m x n:
*
* - if `W` is of size zero, then no weights are used and the Frobenius norm |M|_F is computed
* - if `W` is of size m+n x 1, then row and column weights [w_r; w_c] = W are assumed and |M|_D(w_r w_c^T) is computed
* - if `W` is of size m x n, then element-wise weights are assumed and |M|_D(W) is computed
* - if `W` is of size m x mn, then a block-diagonal weight matrix is assumed and |M|_D(W1,...,Wn) is computed
* - if `W` is of size mn x mn, then a full weight matrix is assumed and |M|_W is computed
*/
template<typename T, typename T1>
double weightedNorm(const MatrixBase<T> &M, const MatrixBase<T1> &W, bool squared = false) throw(std::invalid_argument) {
double result = 0;
if (W.size() == 0) { result = M.squaredNorm(); }
else if (W.cols() == 1 and W.rows() == M.rows() + M.cols()) {
result = (W.col(0).head(M.rows()).asDiagonal() * M * W.col(0).tail(M.cols()).asDiagonal()).squaredNorm();
} else if (W.rows() == M.rows()) {
if (W.cols() == M.cols()) { result = (W.array() * M.array().square()).sum(); }
else if (W.cols() == M.cols() * W.rows()) {
for (long j = 0; j < M.cols(); ++j) {
result += M.col(j).transpose() * W.middleCols(j * W.rows(), W.rows()) * M.col(j);
}
} else { throw std::invalid_argument("size mismatch for W and M"); }
} else if (W.cols() == M.size() and W.rows() == M.size()) {
MatrixXd Mcopy = M;
Map<VectorXd, 1> vecM(Mcopy.data(), Mcopy.size());
result = vecM.transpose() * W * vecM;
} else { throw std::invalid_argument("size mismatch for W and M"); }
result /= M.size();
return squared ? result : std::sqrt(result);
}
SWIGCODE(%template(weightedNorm) weightedNorm<MatrixMd, MatrixMd>;)
SWIGCODE(%apply const MatrixBase<MatrixXd>& OUTPUT { const MatrixBase<MatrixXd>& X };)
//<editor-fold desc="Solve implementations">
/**
* Return
* \ifnot PY
* in the output-argument `X`
* \endif
* the ordinary least-squares (OLS) solution to the problem `A` * `X` = `M` (or if `transposed` to `X` * `A` = `M`) using a `method` from {"Cholesky", "LDLT", "QR" (default), "SVD", "JacobiSVD"}.
*
* The "Cholesky" method solves the normal equations using a Cholesky decomposition. This is the fastest method, but loses most precision and requires the problem to be overdetermined and `A` to have full rank.
*
* The "LDLT" method is essentially the same as "Cholesky", but uses a more robust Cholesky decomposition with pivoting that also avoids taking a square root. This method is recommended over "Cholesky" by Eigen3.
*
* The "QR" method uses a QR decomposition. This is slower than "Cholesky", but gives more precision. The marix `A` should have full rank.
*
* The "SVD" uses an SVD decomposition. This is the slowest, but gives best precision. Also, the matrix `A` does not need to have full rank, and in the case of an underdetermined problem, the least-squares solution with the smallest norm is returned.
*
* The "JacobiSVD" method is similar to the "SVD" method, but uses a different (slower, but potentially more accurate) svd algorithm.
*/
template< typename D, typename D1, typename D2 >
C1(void) PY1(MatrixXd)
solveOLS(C2(const MatrixBase<D> &X,) const MatrixBase<D1> &A, const MatrixBase<D2> &M, bool transposed = false, const std::string& method = "QR") {
if (transposed) {
if (method == "Cholesky" or method == "iCholesky") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A * A.transpose()).llt().solve(A * M.transpose()).transpose();
} else if (method == "LDLT") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A * A.transpose()).ldlt().solve(A * M.transpose()).transpose();
} else if (method == "QR") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = A.transpose().colPivHouseholderQr().solve(M.transpose()).transpose();
} else if (method == "SVD") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = A.transpose().bdcSvd(ComputeThinU | ComputeThinV).solve(M.transpose()).transpose();
} else if (method == "JacobiSVD") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = A.transpose().jacobiSvd(ComputeThinU | ComputeThinV).solve(M.transpose()).transpose();
} else { throw std::invalid_argument("unrecognized method"); }
} else {
if (method == "Cholesky" or method == "iCholesky") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A.transpose() * A).llt().solve(A.transpose() * M);
} else if (method == "LDLT") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A.transpose() * A).ldlt().solve(A.transpose() * M);
} else if (method == "QR") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = A.colPivHouseholderQr().solve(M);
} else if (method == "SVD") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = A.bdcSvd(ComputeThinU | ComputeThinV).solve(M);
} else if (method == "JacobiSVD") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = A.jacobiSvd(ComputeThinU | ComputeThinV).solve(M);
} else { throw std::invalid_argument("unrecognized method"); }
}
}
SWIGCODE(%template(solveOLS) solveOLS<MatrixXd, MatrixMd, MatrixMd>;)
/**
* Return
* \ifnot PY
* in the output-argument `X`,
* \endif
* the row or column weighted least-squares solution to the problem `A` * `X` = `M` with row-weights given in the column vector `W` (or if `transposed` to `X` * `A` = `M` with column-weights given in the row vector `W`) using a `method` from {"Cholesky", "LDLT" (default), "QR", "SVD", "JacobiSVD"}.
*
* This computes `X` that minimizes |D(sqrt_W) * (`A` * `X` - `M`)|_F (or |(`X` * `A` - `M`) * D(sqrt_W)|_F if `transposed`), where `sqrt_W` is the element-wise square-root of `W`, i.e., `W` = `sqrt_W` .* `sqrt_W`, and `.*` denotes the element-wise product. The computation is done by reducing the problem to an OLS problem that is then solved according to the given `method` as detailed below (see also `solveOLS()`). Note that the weights in `W` must be strictly greater than zero.
*
* Note that column weights have no effect in the default case, and row weight have no effect if `transposed`, and are therefore ommitted.
*
* The "Cholesky" method solves the normal equations using a Cholesky decomposition. This is the fastest method, but loses most precision and requires the problem to be overdetermined and `A` to have full rank.
*
* The "LDLT" method is essentially the same as "Cholesky", but uses a more robust Cholesky decomposition with pivoting that also avoids taking a square root. This method is recommended over "Cholesky" by Eigen3.
*
* The "QR" method uses a QR decomposition. This is slower than "Cholesky", but gives more precision. The marix `A` should have full rank.
*
* The "SVD" uses an SVD decomposition. This is the slowest, but gives best precision. Also, the matrix `A` does not need to have full rank, and in the case of an underdetermined problem, the least-squares solution with the smallest norm is returned.
*
* The "JacobiSVD" method is similar to the "SVD" method, but uses a different (slower, but potentially more accurate) svd algorithm.
*/
template< typename D, typename D1, typename D2, typename D3>
C1(void) PY1(MatrixXd)
solveRowColWLS(C2(const MatrixBase<D> &X,) const MatrixBase<D1>&A, const MatrixBase<D2>& M, const MatrixBase<D3>& W, bool transposed = false, const std::string &method = "LDLT") {
if (transposed) {
if (method == "Cholesky" or method == "iCholesky") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A * W.asDiagonal() * A.transpose()).llt().solve(
A * W.asDiagonal() * M.transpose()).transpose();
} else if (method == "LDLT") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A * W.asDiagonal() * A.transpose()).ldlt().solve(
A * W.asDiagonal() * M.transpose()).transpose();
} else {
RowVectorXd sqrt_W = W.cwiseSqrt();
solveOLS(X, A * sqrt_W.asDiagonal(), M * sqrt_W.asDiagonal(), transposed, method);
}
} else {
if (method == "Cholesky" or method == "iCholesky") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A.transpose() * W.asDiagonal() * A).llt().solve(
A.transpose() * W.asDiagonal() * M);
} else if (method == "LDLT") {
const_cast<MatrixBase<D> &>(X).derived().noalias() = (A.transpose() * W.asDiagonal() * A).ldlt().solve(
A.transpose() * W.asDiagonal() * M);
} else {
VectorXd sqrt_W = W.cwiseSqrt();
solveOLS(X, sqrt_W.asDiagonal() * A, sqrt_W.asDiagonal() * M, transposed, method);
}
}
}
SWIGCODE(%template(solveRowColWLS) solveRowColWLS<MatrixXd, MatrixMd, MatrixMd, MatrixMd>;)
/**
* Return
* \ifnot PY
* in the output-argument `X`,
* \endif
* the (element-wise) D(`W`)-weighted least-squares (WLS) solution to the problem `A` * `X` = `M` (or to `X` * `A` = `M` if `transposed`) using a `method` from {"Cholesky", "LDLT" (default), "QR", "SVD", "JacobiSVD"}.
*
* This computes `X` that minimizes |`A` * `X` - `M`|_D(`W`) (or |`X` * `A` - `M`|_D(`W`) if `transposed`). The computation is done by reducing the problem to a set of OLS problems that are then solved according to the given `method` as detailed below (see also `solveOLS()`). Note that the weights in `W` must be strictly greater than zero.
*
* The "Cholesky" method solves the normal equations using a Cholesky decomposition. This is the fastest method, but loses most precision and requires the problem to be overdetermined and `A` to have full rank.
*
* The "LDLT" method is essentially the same as "Cholesky", but uses a more robust Cholesky decomposition with pivoting that also avoids taking a square root. This method is recommended over "Cholesky" by Eigen3.
*
* The "QR" method uses a QR decomposition. This is slower than "Cholesky", but gives more precision. The marix `A` should have full rank.
*
* The "SVD" uses an SVD decomposition. This is the slowest, but gives best precision. Also, the matrix `A` does not need to have full rank, and in the case of an underdetermined problem, the least-squares solution with the smallest norm is returned.
*
* The "JacobiSVD" method is similar to the "SVD" method, but uses a different (slower, but potentially more accurate) svd algorithm.
*/
template< typename D, typename D1, typename D2, typename D3>
C1(void) PY1(MatrixXd)
solveWLS(C2(const MatrixBase<D> &X,) const MatrixBase<D1>& A, const MatrixBase<D2>& M, const MatrixBase<D3>& W, bool transposed = false, const std::string &method = "LDLT") {
if (transposed) {
const_cast<MatrixBase<D> &>(X).derived().resize(M.rows(), A.rows());
if (method == "Cholesky" or method == "iCholesky") {
LLT<MatrixXd> llt;
MatrixXd A_WMT = A * (W.array() * M.array()).matrix().transpose();
for (int i = 0; i < M.rows(); ++i) {
llt.compute(A * W.row(i).asDiagonal() * A.transpose());
const_cast<MatrixBase<D> &>(X).row(i).noalias() = llt.solve(A_WMT.col(i)).transpose();
}
} else if (method == "LDLT") {
LDLT<MatrixXd> ldlt;
MatrixXd A_WMT = A * (W.array() * M.array()).matrix().transpose();
for (int i = 0; i < M.rows(); ++i) {
ldlt.compute(A * W.row(i).asDiagonal() * A.transpose());
const_cast<MatrixBase<D> &>(X).row(i).noalias() = ldlt.solve(A_WMT.col(i)).transpose();
}
} else {
MatrixXd sqrt_W = W.cwiseSqrt();
for (int i = 0; i < M.rows(); ++i) {
solveOLS(const_cast<MatrixBase<D> &>(X).row(i), A * sqrt_W.row(i).asDiagonal(), M.row(i) * sqrt_W.row(i).asDiagonal(), transposed, method);
}
}
} else {
const_cast<MatrixBase<D> &>(X).derived().resize(A.cols(), M.cols());
if (method == "Cholesky" or method == "iCholesky") {
LLT<MatrixXd> llt;
MatrixXd AT_WM = A.transpose() * (W.array() * M.array()).matrix();
for (int j = 0; j < M.cols(); ++j) {
llt.compute(A.transpose() * W.col(j).asDiagonal() * A);
const_cast<MatrixBase<D> &>(X).col(j).noalias() = llt.solve(AT_WM.col(j));
}
} else if (method == "LDLT") {
LDLT<MatrixXd> ldlt;
MatrixXd AT_WM = A.transpose() * (W.array() * M.array()).matrix();
for (int j = 0; j < M.cols(); ++j) {
ldlt.compute(A.transpose() * W.col(j).asDiagonal() * A);
const_cast<MatrixBase<D> &>(X).col(j).noalias() = ldlt.solve(AT_WM.col(j));
}
} else {
MatrixXd sqrt_W = W.cwiseSqrt();
for (int j = 0; j < M.cols(); ++j) {
solveOLS(const_cast<MatrixBase<D> &>(X).col(j), sqrt_W.col(j).asDiagonal() * A, sqrt_W.col(j).asDiagonal() * M.col(j), transposed, method);
}
}
}
}
SWIGCODE(%template(solveWLS) solveWLS<MatrixXd, MatrixXd, MatrixXd, MatrixXd>;)
/**
* Return
* \ifnot PY
* in the output-argument `X`,
* \endif
* the D(W1,..., Wm)-weighted least-squares (GLS) solution to the overdetermined problem `A` * `X` = `M` (or to `X` * `A` = `M` if `transposed`) using a `method` from {"Cholesky", "LDLT" (default)}, where the block-diagonal symmetric and positive definite weight matrix is given by:
* `W` = [[W1]_1,...,[Wn]_1, ..., [W1]_m,...,[Wn]_m], where each `Wj` is the full weight matrix for the column j of `M`.
*
* This computes `X` that minimizes |`A` * `X` - `M`|_D(W1,...,Wn) (or |`X` * `A` - `M`|_D(W1,...,Wn) if `transposed`).
*
* Note that the "LDLT" method is essentially the same as "Cholesky", but uses a more robust Cholesky decomposition with pivoting that also avoids taking a square root. This method is recommended over "Cholesky" by Eigen3.
*/
template< typename D, typename D1, typename D2, typename D3>
C1(void) PY1(MatrixXd)
solveGLS(C2(const MatrixBase<D> &X,) const MatrixBase<D1> &A, const MatrixBase<D2>& M, const MatrixBase<D3>& W, bool transposed = false, const std::string &method = "LDLT") {
assert(W.rows() == M.rows() and W.cols() == M.cols() * M.rows()); // Block-diagonal weight matrix
Map<const MatrixXd> W_reshaped(W.derived().data(), W.cols(), W.rows());
if (transposed) {
// solve XA≈M by solving [\sum_j AjAj^T ⊗ Wj] vec(X) = vec( [W1M1, W2M2, ..., WnMn] A^T )
MatrixXd AT = A.transpose();
MatrixXd AtI_W_ATtI(A.rows() * W.rows(), A.rows() * W.rows()); // we only compute lower triagonal part
#ifdef _OPENMP
#pragma omp parallel
#endif
{
VectorXd ATii_ATjj(W.rows());
for (long jj = 0; jj < A.rows(); ++jj) {
for (long j = 0; j < M.rows(); ++j) {
#ifdef _OPENMP
#pragma omp for
#endif
for (long ii = jj; ii < A.rows(); ++ii) {
ATii_ATjj = AT.col(ii).cwiseProduct(AT.col(jj));
AtI_W_ATtI.block(ii * W.rows(), jj * W.rows() + j, W.rows(), 1).noalias() =
W.middleCols(j * M.cols(), M.cols()) * ATii_ATjj;
}
}
}
}
MatrixXd WM = MatrixXd(W.rows(), M.cols());
for (int j = 0; j < M.cols(); ++j) {
WM.col(j).noalias() = W_reshaped.middleRows(j*W.rows(), W.rows()) * M.col(j);
}
WM *= A.transpose();
const Map<const VectorXd> vecWMAT(WM.data(),WM.size());
MatrixXd vecX;
if (method == "LDLT") { vecX.noalias() = AtI_W_ATtI.ldlt().solve(vecWMAT); }
else if (method == "Cholesky") { vecX.noalias() = AtI_W_ATtI.llt().solve(vecWMAT); }
else if (method == "iCholesky") { LLT<Ref<MatrixXd> > llt(AtI_W_ATtI); vecX.noalias() = llt.solve(vecWMAT); }
else { throw std::invalid_argument("unrecognized method"); }
vecX.resize(M.rows(), A.rows());
const_cast<MatrixBase<D>&>(X).derived() = std::move(vecX);
} else {
// solve AX≈M by solving SjAXj ≈ SjMj for each column j
if (method == "Cholesky" or method == "iCholesky") {
LLT<MatrixXd> llt;
const_cast<MatrixBase<D> &>(X).derived().resize(A.cols(), M.cols());
for (int j = 0; j < X.cols(); ++j) {
llt.compute(A.transpose() * W_reshaped.middleRows(j * W.rows(), W.rows()) * A);
const_cast<MatrixBase<D> &>(X).col(j).noalias() = llt.solve(
A.transpose() * (W_reshaped.middleRows(j * W.rows(), W.rows()) * M.col(j)));
}
} else if (method == "LDLT") {
LDLT<MatrixXd> ldlt;
const_cast<MatrixBase<D> &>(X).derived().resize(A.cols(), M.cols());
for (int j = 0; j < X.cols(); ++j) {
ldlt.compute(A.transpose() * W_reshaped.middleRows(j * W.rows(), W.rows()) * A);
const_cast<MatrixBase<D> &>(X).col(j).noalias() = ldlt.solve(
A.transpose() * (W_reshaped.middleRows(j * W.rows(), W.rows()) * M.col(j)));
}
}
}
}
SWIGCODE(%template(solveGLS) solveGLS<MatrixXd, MatrixXd, MatrixXd, MatrixXd>;)
//</editor-fold>
/**
* Return
* \ifnot PY
* in the output-argument `X`,
* \endif
* the generalized weighted least-squares (GLS) solution to the overdetermined problem `A` * `X` = `M` (or to the problem `X` * `A` = `M` if `transposed`) with the given weights `W` using a `method` from {"Cholesky", "LDLT" (default), "QR", "SVD", "JacobiSVD"}.
*
* This computes `X` that minimizes the weighted norm |`A` * `X` - `M`|_W (or |`X` * `A` - `M`|_W if `transposed`), utilizing the structure of W that depends on the size of the supplied weights `W` in the following way, assuming `M` is of size m x n:
*
* - If `W` is of size zero (default), then no weights are used and the ordinary least squares (OLS) solution minimizing the Frobenius norm is returned.
* - If `W` is of size m x n, then element-wise weights are assumed, i.e., W = D(`W`), resulting in the weighted least squares (WLS) solution.
* - If `W` is of size m x nm, then a block-diagonal structure for W is assumed, i.e., W = D(`W`_1, ..., `W`_m), where `W`_j is the j-th (m x m)-block of `W` corresponding to the weights for [`M`]_j, which must be symmetric an positive definite.
* - If `W` is of size m x 1, then these are regarded as row-weights, which only make sense if not `transposed`.
* - If `W` is of size 1 x n, then these are regarded as column-weights, which only make sense if `transposed`.
* - If `W` is of size m+n x 1 then `W`[:m] are regarded as row-weights and `W`[m:] as column-weights. If `transposed` the row-weights, else the column-weights, have no effect.
*
* Note that solving GLS with full weight matrix is expensive, and therefore only solving with block-diagonal structured weight matrix is supported, and then only the methods "Cholesky" and "LDLT" which can make use of extra simplifications.
*
* The computation is done by reducing the problem to a set of OLS problems that are then solved according to the given `method` as detailed below. Note that the weights in `W` must be strictly greater than zero.
*
* The "Cholesky" method solves the normal equations using a Cholesky decomposition. This is the fastest method, but loses most precision and requires the problem to be overdetermined and `A` to have full rank.
*
* The "LDLT" method is essentially the same as "Cholesky", but uses a more robust Cholesky decomposition with pivoting that also avoids taking a square root. This method is recommended over "Cholesky" by Eigen3.
*
* The "QR" method uses a QR decomposition. This is slower than "Cholesky", but gives more precision. The marix `A` should have full rank.
*
* The "SVD" uses an SVD decomposition. This is the slowest, but gives best precision. Also, the matrix `A` does not need to have full rank in which case the least-squares solution with the smallest norm is returned.
*
* The "JacobiSVD" method is similar to the "SVD" method, but uses a different (slower, but potentially more accurate) svd algorithm.
*
*/
template< typename D>
C1(void) PY1(MatrixXd)
solveLS(C2(const MatrixBase<D> &X,) const MatrixXd &A, const MatrixXd &M, const MatrixXd &W = MatrixXd(), bool transposed = false, std::string method = "LDLT") throw(std::invalid_argument) {
if (not (method == "Cholesky" or method == "iCholesky" or method == "LDLT" or method == "QR" or method == "SVD" or method == "JacobiSVD")) throw std::invalid_argument("method not recognized");
if (not transposed) { // solve AX=M
if (A.rows() != M.rows()) throw std::invalid_argument("LHS rows != RHS rows");
if (A.cols() > A.rows()) throw std::invalid_argument("system must be overdetermined");
if (W.size() == 0) { solveOLS(X, A, M, transposed, method); }
else if (W.rows() == M.rows() and W.cols() == M.cols()) { solveWLS(X, A, M, W, transposed, method); }
else if (W.rows() == M.rows() and W.cols() == M.rows() * M.cols()) {
if (method != "Cholesky" and method != "LDLT" and method != "iCholesky") throw std::invalid_argument("GLS only supported with method 'Cholesky' or 'LDLT'");
solveGLS(X, A, M, W, transposed, method); }
else if (W.rows() == W.cols() and W.cols() == M.rows() * M.cols()) throw std::invalid_argument("GLS with full weight matrix not implemented");
else if (W.rows() == M.rows() and W.cols() == 1) { solveRowColWLS(X, A, M, W, transposed, method); }
else if (W.cols() == 1 and W.rows() == M.rows() + M.cols()) { solveRowColWLS(X, A, M, W.col(0).head(M.rows()), transposed, method); }
else { throw std::invalid_argument("size mismatch for M and W"); }
} else { // solve XA=M
if (A.cols() != M.cols()) throw std::invalid_argument("LHS cols != RHS cols");
if (A.rows() > A.cols()) throw std::invalid_argument("system must be overdetermined");
if (W.size() == 0) { solveOLS(X, A, M, transposed, method); }
else if (W.rows() == M.rows() and W.cols() == M.cols()) { solveWLS(X, A, M, W, transposed, method); }
else if (W.rows() == M.rows() and W.cols() == M.rows() * M.cols()) {
if (method != "Cholesky" and method != "LDLT" and method != "iCholesky") throw std::invalid_argument("GLS only supported with method 'Cholesky' or 'LDLT'");
solveGLS(X, A, M, W, transposed, method); }
else if (W.rows() == W.cols() and W.cols() == M.rows() * M.cols()) throw std::invalid_argument("LS with full weight matrix not implemented");
else if (W.rows() == 1 and W.cols() == M.cols()) { solveRowColWLS(X, A, M, W, transposed, method); }
else if (W.cols() == 1 and W.rows() == M.rows() + M.cols()) { solveRowColWLS(X, A, M, W.col(0).tail(M.cols()).transpose(), transposed, method); }
else { throw std::invalid_argument("size mismatch for M and W"); }
}
}
SWIGCODE(%template(solveLS) solveLS<MatrixXd>;)
/**
* Return a new weight matrix for `X`, assuming `X` is a solution to the D(`W`)-weighted WLS problem `B` * `X` = `M`.
*
* Note that the columns of `X` can be regarded as coordinate representations for the columns of `M` with respect to a basis given by the columns of `B`. This function transforms the given weights for the columns of `M` to appropriate weights for the coordinates in the columns of `X`. The resulting weight matrix for `X` will therefore be block-diagonal in general, but if `covariances` is set to `false`, the off-diagonal weights are ignored, resulting in element-wise weights for `X`.
*
* The returned matrix will therefore be
*
* - [[tW_1]_1, ..., [tW_m]_1, ..., [tW_1]_{B.cols}, ..., [tW_m]_{B.cols}] of size (B.cols x M.cols) * B.cols if `covariances`, where
* tW_j = B^T * D([W]_j) * B
* - [diag(B^T * D([W]_1) * B), ..., diag(B^T * D([W]_m) * B)] of size B.cols() x M.cols() otherwise.
*/
template< typename D1, typename D2 >
MatrixXd transformWeights(const MatrixBase<D1> &W, const MatrixBase<D2> &B, bool covariances = true) {
MatrixXd newWeights(B.cols(), W.cols() * (covariances ? B.cols() : 1));
for (int j = 0; j < W.cols(); ++j) {
if (covariances) {
MatrixXd::Map(newWeights.data(), B.cols() * W.cols(), B.cols()).middleRows(j * B.cols(), B.cols()).noalias() =
B.transpose() * W.col(j).asDiagonal() * B;
} else {
newWeights.col(j).noalias() = (B.transpose() * W.col(j).asDiagonal() * B).diagonal();
}
}
return newWeights;
}
SWIGCODE(%template(transformWeights) transformWeights<MatrixMd, MatrixMd>;)
/**
* \ifnot PY
* Compute in the arguments `U`,`s` and `VT`
* \else
* Return in a tuple [U,s,VT]
* \endif
* the singular value decomposition of the given matrix `M`, such that `M = U D(s) VT`, using the given `method` ('bdc' | 'jacobi').
*/
SWIGCODE(%apply const MatrixXd& OUTPUT { const MatrixXd &U, const MatrixXd &VT };)
SWIGCODE(%apply const ArrayXd& OUTPUT { const ArrayXd &s };)
template<typename D >
C1(void) PY3(tuple<MatrixXd, ArrayXd, MatrixXd>)
svd(C4(const MatrixXd &U, const ArrayXd &s, const MatrixXd &VT,) const MatrixBase<D> &M, const std::string &method="bdc") {
if (method == "bdc") {
BDCSVD<MatrixXd> svd(M, ComputeThinU | ComputeThinV);
const_cast<MatrixXd&>(U) = svd.matrixU();
const_cast<ArrayXd&>(s) = svd.singularValues();
const_cast<MatrixXd&>(VT) = svd.matrixV().transpose();
} else if (method == "jacobi") {
JacobiSVD<MatrixXd> svd(M, ComputeThinU | ComputeThinV);
const_cast<MatrixXd&>(U) = svd.matrixU();
const_cast<ArrayXd&>(s) = svd.singularValues();
const_cast<MatrixXd&>(VT) = svd.matrixV().transpose();
}
}
SWIGCODE(%template(svd) svd<MatrixMd >;)
SWIGCODE(%clear const MatrixXd &U, const ArrayXd &s, const MatrixXd &VT;)
/**
* Return the pseudo-inverse of the given matrix `M` computed according to the given `method` from {"Cholesky", "QR", "SVD" (default), "JacobiSVD"}.
*
* If `method` is "SVD" or "JacobiSVD", the classical pseudo-inverse is computed from the svd of `M`.
*
* If `method` is "QR", the pseudo-inverse is computed from the QR-factorization of `M`, which requires `M` to have full rank.
*
* If `method` is "Cholesky" or "LDLT", the pseudo-inverse is computed as \f$(M^\top M)^{-1} M^\top\f$ or \f$M^\top (M M^\top)^{-1}\f$ depending on the size of `M`, which requires `M` to have full rank.
*/
template<typename D>
MatrixXd pinv(const MatrixBase<D> &M, const std::string &method = "SVD") {
MatrixXd result;
unsigned long I_size = M.rows();
bool transpose = false;
if ((method == "LDLT" or method == "Cholesky" or method == "iCholesky" or method == "QR") and (M.rows() < M.cols())) {
I_size = M.cols();
transpose = true;
}
solveOLS(result, M, MatrixXd::Identity(I_size, I_size), transpose, method);
return result;
}
SWIGCODE(%template(pinv) pinv<MatrixMd>;)
/**
* Compute in the arguments `B` and `A` (an approximation to) the best weighted rank-d approximation to `M` with element-wise weights `W` such that |`B` * `A` - `M`|_D(W) is minimized. This is computed iteratively starting from an initial approximation given by `B` * `A` using "alternating projections" solved via the given `method`. The termination of the iteration is controlled by the given `stopCondition`.
* \ifnot PY
* See `StopCondition`.
* \else
* See `tom.util.StopCondition`.
* \endif
*/
template<typename D1, typename D2, typename D3, typename D4>
void improveWLRA(const MatrixBase<D1> &B, const MatrixBase<D2> &A, const MatrixBase<D3> &M, const MatrixBase<D4> &W,
const StopCondition& stopCondition = StopCondition(50, 1e-5, 1e-12), const std::string &method = "Cholesky") {
const_cast<StopCondition&>(stopCondition).reset();
while (not const_cast<StopCondition&>(stopCondition)(weightedNorm(M - B * A, W))) {
solveWLS(B, A, M, W, true, method);
solveWLS(A, B, M, W, false, method);
}
}
SWIGCODE(%template(improveWLRA) improveWLRA<MatrixMd, MatrixMd, MatrixXd, MatrixXd >;)
/**
* \ifnot PY
* Compute in the arguments `B` and `A`
* \else
* Return in a tuple [B,A]
* \endif
* (an approximation to) the best weighted rank-d approximation to `M` with element-wise weights `W` such that |`B` * `A` - `M`|_D(vect(W)) is minimized. This is computed iteratively starting from an initial approximation given by `B_init` using "alternating projections", which are in turn solved via the given `method`. The termination of the iteration is controlled by the given `stopCondition`.
* \ifnot PY
* See `StopCondition`.
* \else
* See `tom.util.StopCondition`.
* \endif
*/
SWIGCODE(%apply const MatrixBase<MatrixXd>& OUTPUT { const MatrixBase<MatrixXd> &B, const MatrixBase<MatrixXd> &A };)
template<typename D1, typename D2, typename D3, typename D4, typename D5>
C1(void) PY2(tuple<MatrixXd, MatrixXd>)
computeWLRA(C3(const MatrixBase<D1> &B, const MatrixBase<D2> &A,) const MatrixBase<D3> &M, const MatrixBase<D4> &W, const MatrixBase<D5> &B_init, const StopCondition& stopCondition = StopCondition(50, 1e-5, 1e-12), const std::string &method = "Cholesky") throw(std::invalid_argument) {
if (W.rows() != M.rows() or W.cols() != M.cols() or B_init.rows() != M.rows() or B_init.cols() > M.cols()) {
throw std::invalid_argument("size mismatch");
}
const_cast<MatrixBase<D1>&>(B).derived() = B_init;
solveWLS(A, B, M, W, false, method);
improveWLRA(B, A, M, W, stopCondition, method);
}
SWIGCODE(%template(computeWLRA) computeWLRA<MatrixXd, MatrixXd, MatrixXd, MatrixXd, MatrixXd >;)
SWIGCODE(%clear const MatrixBase<MatrixXd> &X, const MatrixBase<MatrixXd> &B, const MatrixBase<MatrixXd> &A;)
SWIGCODE(%clearkwargs;)
} // namespace tom
|
ex_static4.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main()
{
int i,n,t,chunk;
double wtime;
chunk=4;
t=4;
n=16;
#pragma omp parallel for num_threads(t)
for (i = 0; i < n; i++) {
printf("Thread %d is doing iteration %d.\n", omp_get_thread_num( ), i);
}
printf("------------------------------\n");
wtime = omp_get_wtime ( );
#pragma omp parallel for schedule(static,chunk) num_threads(t)
for (i = 0; i < n; i++) {
sleep(i); /* wait for i seconds */
printf("Thread %d has completed iteration %d.\n",omp_get_thread_num(),i);
}
wtime = omp_get_wtime ( )-wtime;
printf("Static Scheduling time needed- %f\n",wtime);
printf("------------------------------\n");
wtime = omp_get_wtime ( );
#pragma omp parallel for schedule(dynamic) num_threads(t)
for (i = 0; i < n; i++) {
sleep(i); /* wait for i seconds */
printf("Thread %d has completed iteration %d.\n", omp_get_thread_num( ), i);
}
wtime = omp_get_wtime ( )-wtime;
printf("Dynamic Scheduling time needed- %f\n",wtime);
return 0;
}
|
GB_subref_phase0.c | //------------------------------------------------------------------------------
// GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_subref.h"
#define GB_Ai(p) GB_UNFLIP (Ai [p])
//------------------------------------------------------------------------------
// GB_find_Ap_start_end
//------------------------------------------------------------------------------
// Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector
// A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense
// vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then
// pA and pA_end are set to -1 to denote an empty list. The resulting pointers
// are then returned in Ap_start [kC] and Ap_end [kC].
static inline void GB_find_Ap_start_end
(
// input, not modified
const int64_t kA,
const int64_t *restrict Ap,
const int64_t *restrict Ai,
const int64_t avlen,
const int64_t imin,
const int64_t imax,
const int64_t kC,
const int64_t nzombies,
// output: Ap_start [kC] and Ap_end [kC]:
int64_t *restrict Ap_start,
int64_t *restrict Ap_end
)
{
//--------------------------------------------------------------------------
// get A(:,kA)
//--------------------------------------------------------------------------
int64_t pA = Ap [kA] ;
int64_t pA_end = Ap [kA+1] ;
int64_t ajnz = pA_end - pA ;
//--------------------------------------------------------------------------
// trim it to A(imin:imax,kA)
//--------------------------------------------------------------------------
if (ajnz == avlen)
{
//----------------------------------------------------------------------
// A (:,kA) is dense; use pA and pA_end as-is
//----------------------------------------------------------------------
;
}
else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin)
{
//----------------------------------------------------------------------
// intersection of A(:,kA) and imin:imax is empty
//----------------------------------------------------------------------
pA = -1 ;
pA_end = -1 ;
}
else
{
//----------------------------------------------------------------------
// A (:,kA) is sparse, with at least one entry
//----------------------------------------------------------------------
// trim the leading part of A(:,kA)
if (GB_Ai (pA) < imin)
{
bool found, is_zombie ;
int64_t pright = pA_end - 1 ;
GB_BINARY_SPLIT_ZOMBIE (imin, Ai, pA, pright, found, nzombies,
is_zombie) ;
}
// trim the trailing part of A (:,kA)
if (imin == imax)
{
if (GB_Ai (pA) == imin)
{
// found the the single entry A (i,kA)
pA_end = pA + 1 ;
}
else
{
// A (i,kA) has not been found
pA = -1 ;
pA_end = -1 ;
}
}
else if (imax < GB_Ai (pA_end-1))
{
bool found, is_zombie ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
GB_BINARY_SPLIT_ZOMBIE (imax, Ai, pleft, pright, found, nzombies,
is_zombie) ;
pA_end = (found) ? (pleft + 1) : pleft ;
}
#ifdef GB_DEBUG
ajnz = pA_end - pA ;
if (ajnz > 0)
{
// A(imin:imax,kA) is now in Ai [pA:pA_end-1]
ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ;
ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ;
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
}
#endif
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// The result [pA:pA_end-1] defines the range of entries that need to be
// accessed for constructing C(:,kC).
Ap_start [kC] = pA ;
Ap_end [kC] = pA_end ;
}
//------------------------------------------------------------------------------
// GB_subref_phase0
//------------------------------------------------------------------------------
#define GB_FREE_WORK \
GB_FREE_MEMORY (Count, max_ntasks+1, sizeof (int64_t)) ;
GrB_Info GB_subref_phase0
(
// output
int64_t *restrict *p_Ch, // Ch = C->h hyperlist, or NULL standard
int64_t *restrict *p_Ap_start, // A(:,kA) starts at Ap_start [kC]
int64_t *restrict *p_Ap_end, // ... and ends at Ap_end [kC] - 1
int64_t *p_Cnvec, // # of vectors in C
bool *p_need_qsort, // true if C must be sorted
int *p_Ikind, // kind of I
int64_t *p_nI, // length of I
int64_t Icolon [3], // for GB_RANGE, GB_STRIDE
int64_t *p_nJ, // length of J
// input, not modified
const GrB_Matrix A,
const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t ni, // length of I, or special
const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc.
const int64_t nj, // length of J, or special
const bool must_sort, // true if C must be returned sorted
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Ch != NULL) ;
ASSERT (p_Ap_start != NULL) ;
ASSERT (p_Ap_end != NULL) ;
ASSERT (p_Cnvec != NULL) ;
ASSERT (p_nJ != NULL) ;
ASSERT (p_Ikind != NULL) ;
ASSERT (p_nI != NULL) ;
ASSERT (Icolon != NULL) ;
ASSERT_OK (GB_check (A, "A for subref phase 0", GB0)) ;
ASSERT (I != NULL) ;
ASSERT (J != NULL) ;
GrB_Info info ;
(*p_Ch ) = NULL ;
(*p_Ap_start ) = NULL ;
(*p_Ap_end ) = NULL ;
(*p_Cnvec ) = 0 ;
(*p_need_qsort) = false ;
(*p_Ikind ) = 0 ;
(*p_nI ) = 0 ;
(*p_nJ ) = 0 ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
int64_t *restrict Ap = A->p ; // Ap (but not A->p) may be trimmed
int64_t *restrict Ah = A->h ; // Ah (but not A->h) may be trimmed
int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ; // may be trimmed
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t nzombies = A->nzombies ;
//--------------------------------------------------------------------------
// check the properties of I and J
//--------------------------------------------------------------------------
// C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1
int64_t nI, nJ, Jcolon [3] ;
int Ikind, Jkind ;
GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ;
GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ;
bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ;
int64_t imin, imax, jmin, jmax ;
info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon,
&I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ;
if (info != GrB_SUCCESS)
{
// I invalid
return (info) ;
}
info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon,
&J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ;
if (info != GrB_SUCCESS)
{
// J invalid
return (info) ;
}
bool need_qsort = I_unsorted ;
// For the symbolic case, GB_subref must always return C sorted. For the
// numeric case, GB_subref may return C with jumbled indices in each
// vector, if C will be transposed later by GB_accum_mask.
if (must_sort == false)
{
// The caller does not need C to be returned with sorted vectors.
need_qsort = false ;
}
//--------------------------------------------------------------------------
// determine if C is empty
//--------------------------------------------------------------------------
bool C_empty = (nI == 0 || nJ == 0) ;
//--------------------------------------------------------------------------
// trim the hyperlist of A
//--------------------------------------------------------------------------
// Ah, Ap, and anvec are modified to include just the vectors in range
// jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap
// pointers, and the scalar anvec. If J is ":", then jmin is zero and
// jmax is avdim-1, so there is nothing to trim from Ah. If C is empty,
// then Ah and Ap will not be accessed at all, so this can be skipped.
bool A_is_hyper = A->is_hyper ;
if (A_is_hyper && !C_empty)
{
//----------------------------------------------------------------------
// trim the leading end of Ah so that it starts with jmin:...
//----------------------------------------------------------------------
if (jmin > 0)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SPLIT_SEARCH (jmin, Ah, kleft, kright, found) ;
Ah += kleft ;
Ap += kleft ;
anvec -= kleft ;
}
//----------------------------------------------------------------------
// trim the trailing end of Ah so that it ends with ..:jmax
//----------------------------------------------------------------------
if (jmax < avdim-1)
{
bool found ;
int64_t kleft = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SPLIT_SEARCH (jmax, Ah, kleft, kright, found) ;
anvec = (found) ? (kleft + 1) : kleft ;
}
// Ah has been trimmed
ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax));
}
// Ah may now be empty, after being trimmed
C_empty = C_empty || (anvec == 0) ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1, ntasks = 1 ;
int max_ntasks = nthreads_max * 8 ;
int64_t *restrict Count = NULL ; // size max_ntasks+1
#define GB_GET_NTHREADS_AND_NTASKS(work) \
{ \
nthreads = GB_nthreads (work, chunk, nthreads_max) ; \
ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; \
ntasks = GB_IMIN (ntasks, work) ; \
ntasks = GB_IMAX (ntasks, 1) ; \
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_CALLOC_MEMORY (Count, max_ntasks+1, sizeof (int64_t)) ;
if (Count == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute Cnvec and determine the format of Ch
//--------------------------------------------------------------------------
// Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC]
// if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and
// jC == kC. jC is in the range 0 to nJ-1.
int64_t *restrict Ch = NULL ;
int64_t *restrict Ap_start = NULL ;
int64_t *restrict Ap_end = NULL ;
int64_t Cnvec = 0 ;
int64_t jbegin = Jcolon [GxB_BEGIN] ;
int64_t jinc = Jcolon [GxB_INC ] ;
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
Cnvec = nJ ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec.
// so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed.
Cnvec = anvec ;
ASSERT (Cnvec <= nJ) ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend, but J is large
//----------------------------------------------------------------------
// The case for Jkind == GB_STRIDE can be done by either this method,
// or the one below. This takes O(anvec) time, and the one below
// takes O(nj*log2(anvec)), so use this method if anvec < nj * 64.
// Ch is a list of length Cnvec, where Cnvec is the length of
// the intersection of Ah and jbegin:jinc:jend.
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (anvec) ;
// scan all of Ah and check each entry if it appears in J
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end, my_Cnvec = 0 ;
GB_PARTITION (kA_start, kA_end, anvec,
(jinc > 0) ? tid : (ntasks-tid-1), ntasks) ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
my_Cnvec++ ;
}
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
Cnvec = Count [ntasks] ;
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:end
//----------------------------------------------------------------------
// Ch is an explicit list: the intersection of Ah and J
// count the length of Ch
Cnvec = 0 ;
GB_GET_NTHREADS_AND_NTASKS (nJ) ;
// scan all of J and check each entry if it appears in Ah
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end, my_Cnvec = 0 ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found) my_Cnvec++ ;
}
Count [tid] = my_Cnvec ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
Cnvec = Count [ntasks] ;
}
//--------------------------------------------------------------------------
// allocate Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
C_empty = C_empty || (Cnvec == 0) ;
// C is hypersparse if A is hypersparse, or if C is empty
bool C_is_hyper = A_is_hyper || C_empty ;
if (C_is_hyper)
{
GB_MALLOC_MEMORY (Ch, Cnvec, sizeof (int64_t)) ;
if (Ch == NULL)
{
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
}
if (Cnvec > 0)
{
GB_MALLOC_MEMORY (Ap_start, Cnvec, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Ap_end, Cnvec, sizeof (int64_t)) ;
if (Ap_start == NULL || Ap_end == NULL)
{
// out of memory
GB_FREE_WORK ;
GB_FREE_MEMORY (Ch, Cnvec, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ap_start, Cnvec, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ap_end, Cnvec, sizeof (int64_t)) ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// create Ch, Ap_start, and Ap_end
//--------------------------------------------------------------------------
// For the (kC)th vector of C, which corresponds to the (kA)th vector of A,
// pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range
// of entries in A(imin:imax,kA).
if (C_empty)
{
//----------------------------------------------------------------------
// C is an empty hypersparse matrix
//----------------------------------------------------------------------
;
}
else if (!A_is_hyper)
{
//----------------------------------------------------------------------
// both C and A are standard matrices
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t jC = 0 ; jC < nJ ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax,
jC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_ALL || Jkind == GB_RANGE)
{
//----------------------------------------------------------------------
// J is ":" or jbegin:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is a shifted copy of the trimmed
// Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed.
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kC = 0 ; kC < Cnvec ; kC++)
{
int64_t kA = kC ;
int64_t jA = Ah [kA] ;
int64_t jC = jA - jmin ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
}
}
else if (Jkind == GB_STRIDE && anvec < nJ * 64)
{
//----------------------------------------------------------------------
// J is jbegin:jinc:jend where jinc may be positive or negative
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning all
// vectors in Ah [0..anvec-1] and checking if they appear in the
// jbegin:jinc:jend sequence.
if (jinc > 0)
{
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_start ; kA < kA_end ; kA++)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
else
{
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t kA_start, kA_end ;
GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--)
{
int64_t jA = Ah [kA] ;
if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon))
{
int64_t jC = (jA - jbegin) / jinc ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
}
else // Jkind == GB_LIST or GB_STRIDE
{
//----------------------------------------------------------------------
// J is an explicit list, or jbegin:jinc:jend
//----------------------------------------------------------------------
// C and A are both hypersparse. Ch is constructed by scanning the
// list J, or the entire jbegin:jinc:jend sequence. Each vector is
// then found in Ah, via binary search.
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jC_start, jC_end ;
GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ;
int64_t kC = Count [tid] ;
for (int64_t jC = jC_start ; jC < jC_end ; jC++)
{
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
bool found ;
int64_t kA = 0 ;
int64_t kright = anvec-1 ;
GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ;
if (found)
{
ASSERT (jA == Ah [kA]) ;
Ch [kC] = jC ;
GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax,
kC, nzombies, Ap_start, Ap_end) ;
kC++ ;
}
}
}
}
//--------------------------------------------------------------------------
// check result
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
for (int64_t kC = 0 ; kC < Cnvec ; kC++)
{
// jC is the (kC)th vector of C = A(I,J)
int64_t jC = (Ch == NULL) ? kC : Ch [kC] ;
int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ;
// jA is the corresponding (kA)th vector of A.
int64_t kA = 0 ;
int64_t pright = A->nvec - 1 ;
int64_t pA_start_all, pA_end_all ;
bool found = GB_lookup (A->is_hyper, A->h, A->p, &kA, pright, jA,
&pA_start_all, &pA_end_all) ;
if (found && A->is_hyper)
{
ASSERT (jA == A->h [kA]) ;
}
int64_t pA = Ap_start [kC] ;
int64_t pA_end = Ap_end [kC] ;
int64_t ajnz = pA_end - pA ;
if (ajnz == avlen)
{
// A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector.
// C(:,kC) will have exactly nI entries.
ASSERT (pA == pA_start_all) ;
ASSERT (pA_end == pA_end_all ) ;
;
}
else if (ajnz > 0)
{
// A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1]
ASSERT (imin <= GB_Ai (pA)) ;
ASSERT (GB_Ai (pA_end-1) <= imax) ;
ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ;
}
else
{
// A(imin:imax,kA) and C(:,kC) are empty
;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_Ch ) = Ch ;
(*p_Ap_start ) = Ap_start ;
(*p_Ap_end ) = Ap_end ;
(*p_Cnvec ) = Cnvec ;
(*p_need_qsort) = need_qsort ;
(*p_Ikind ) = Ikind ;
(*p_nI ) = nI ;
(*p_nJ ) = nJ ;
return (GrB_SUCCESS) ;
}
|
dpapimk_fmt_plug.c | /* DPAPI masterkey file version 1 and 2 cracker by
* Fist0urs <jean-christophe.delaunay at synacktiv.com>
*
* This software is Copyright (c) 2017
* Fist0urs <jean-christophe.delaunay at synacktiv.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for the algorithm go to "dpapick" project,
* https://bitbucket.org/jmichel/dpapick
* and Dhiru Kholia <dhiru.kholia at gmail.com> for his
* work on the DPAPI masterkey file version 1 implementation.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DPAPImk;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DPAPImk);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "aes.h"
#include "sha.h"
#include "md4.h"
#include "hmac_sha.h"
#include "memdbg.h"
#define DPAPI_CRAP_LOGIC
#include "pbkdf2_hmac_sha512.h"
#include "pbkdf2_hmac_sha1.h"
#define FORMAT_LABEL "DPAPImk"
#define FORMAT_TAG "$DPAPImk$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG) - 1)
#define FORMAT_NAME "DPAPI masterkey file v1 and v2"
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
#define ALGORITHM_NAME "SHA1/MD4 PBKDF2-(SHA1/SHA512)-DPAPI-variant 3DES/AES256 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "SHA1/MD4 PBKDF2-(SHA1/SHA512)-DPAPI-variant 3DES/AES256 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define MAX_CT_LEN 4096
#define MAX_SID_LEN 1024
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MAX_IV_LEN 16
#define KEY_LEN2 32
#define IV_LEN2 16
#define DIGEST_LEN2 64
#define KEY_LEN1 24
#define IV_LEN1 8
#define DIGEST_LEN1 20
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
#define MIN_KEYS_PER_CRYPT (SSE_GROUP_SZ_SHA1 * SSE_GROUP_SZ_SHA512)
#define MAX_KEYS_PER_CRYPT (SSE_GROUP_SZ_SHA1 * SSE_GROUP_SZ_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests dpapimk_tests[] = {
/* new samples, including other Windows versions and both local and domain credentials */
{"$DPAPImk$1*1*S-15-21-447321867-460417387-480872410-1240*des3*sha1*24000*9b49e2d3b25103d03e936fdf66b94d26*208*ec96025ed4b023ebfa52bdfd91dfeb64edf3f3970b347ee8bb8adfb2a686a0a34792d40074edd372f346da8fcd02cc5d4182c2fd09f4549ec106273926edd05c42e4b5fc8b8758a7c48f6ddae273f357bcb645c8ad16e3161e8a9dbb5002454f4db5ef0d5d7a93ac", "bouledepetanque"},
{"$DPAPImk$1*2*S-15-21-458698633-447735394-485599537-1788*des3*sha1*24000*96b957d9bf0f8846399e70a84431b595*208*0ee9fa2baf2cf0efda81514376aef853c6c93a5776fa6af66a869f44c50ac80148b7488f52b4c52c305e89a497a583e17cca4a9bab580668a8a5ce2eee083382c98049e481e47629b5815fb16247e3bbfa62c454585aaaf51ef15555a355fcf925cff16c0bb006f8", "jordifirstcredit"},
{"$DPAPImk$2*1*S-15-21-417226446-481759312-475941522-1494*aes256*sha512*8000*1e6b7a71a079bc12e71c75a6bcfd865c*288*5b5d651e538e5185f7d6939ba235ca2d8a2b9726a6e95b59844320ba1d1f22282527210bc784d22075e596d113927761a644ad4057cb4dbb497bd64ee6c630930a4ba388eadb59484ec2be7fb4cc79299a87f341d002d25b5b187c71fa19417ec9d1b65568a79c962cb3b5bcb1b8df5f968669af35eec5a24ed5dcee46deef42bfee5ad665dd4de56ccd9c6ba26b2acd", "PaulSmiteSuper160"},
{"$DPAPImk$2*2*S-15-21-402916398-457068774-444912990-1699*aes256*sha512*17000*4c51109a901e4be7f1e208f82a56e690*288*bb80d538ac4185eb0382080fda0d405bb74da3a6b98e96f222292b819fa9168cf1571e9bc9c698ad10daf850ab34de1a1765cfd5c0fb8a63a413a767d241dfe6355804af259d24f6be7282daac0a9e02d7fbe20675afb3733141995990a6d11012edfb7e81b49c0e1132dbc4503dd2206489e4f512e4fe9d573566c9d8973188b8d1a87610b8bef09e971270a376a52b", "Juan-Carlos"},
/* old samples, with less iterations, preserved for backward compatibiliy */
{"$DPAPImk$1*1*S-1-5-21-1482476501-1659004503-725345543-1003*des3*sha1*4000*b3d62a0b06cecc236fe3200460426a13*208*d3841257348221cd92caf4427a59d785ed1474cab3d0101fc8d37137dbb598ff1fd2455826128b2594b846934c073528f8648d750d3c8e6621e6f706d79b18c22f172c0930d9a934de73ea2eb63b7b44810d332f7d03f14d1c153de16070a5cab9324da87405c1c0", "openwall"},
{"$DPAPImk$1*1*S-1-5-21-1482476501-1659004503-725345543-1005*des3*sha1*4000*c9cbd491f78ea6d512276b33f025bce8*208*091a13443cfc2ddb16dcf256ab2a6707a27aa22b49a9a9011ebf3bb778d0088c2896de31de67241d91df75306e56f835337c89cfb2f9afa940b4e7e019ead2737145032fac0bb34587a707d42da7e00b72601a730f5c848094d54c47c622e2f8c8d204c80ad061be", "JtRisthebest"},
{NULL}
};
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
uint32_t version;
uint32_t cred_type;
UTF16 SID[MAX_SID_LEN + 1];
//unsigned char cipher_algo[20]; /* here only for possible other algorithms */
//unsigned char hash_algo[20]; /* same */
uint32_t pbkdf2_iterations;
unsigned char iv[16];
uint32_t encrypted_len;
unsigned char encrypted[512];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
if (omp_t > 1) {
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
int length1, length2, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$DPAPImk$" */
if ((p = strtokm(ctcopy, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* credential type */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* SID */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* cipher algorithm */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash algorithm */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* IV */
goto err;
if (strlen(p) != 32 || !ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted length */
goto err;
if (!isdec(p))
goto err;
length1 = atoi(p);
if ((p = strtokm(NULL, "*")) == NULL) /* encrypted part */
goto err;
length2 = hexlenl(p, &extra);
if (length2 < 64 * 2 || length2 > 512 * 2 || (length1 != length2) || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i, SID_size;
static struct custom_salt cs;
static char *ptrSID;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$DPAPImk$" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p); /* version */
p = strtokm(NULL, "*");
cs.cred_type = atoi(p); /* credential type */
p = strtokm(NULL, "*"); /* SID */
ptrSID = (char *)malloc(strlen(p) + 1);
memcpy(ptrSID, p, strlen(p));
ptrSID[strlen(p)] = '\0';
SID_size = enc_to_utf16(cs.SID, PLAINTEXT_LENGTH, (UTF8 *) ptrSID, strlen(ptrSID) + 1);
free(ptrSID);
if (SID_size < 0){
error_msg("SID_size < 0 !");
}
p = strtokm(NULL, "*"); /* cipher algorithm */
p = strtokm(NULL, "*"); /* hash algorithm */
p = strtokm(NULL, "*"); /* pbkdf2 iterations */
cs.pbkdf2_iterations = (uint32_t) atoi(p);
p = strtokm(NULL, "*"); /* iv */
for (i = 0; i < 16; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*"); /* encrypted length */
cs.encrypted_len = (uint32_t) atoi(p) / 2;
p = strtokm(NULL, "*"); /* encrypted stuff */
for (i = 0; i < cs.encrypted_len; i++)
cs.encrypted[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int decrypt_v1(unsigned char *key, unsigned char *iv, unsigned char *pwdhash, unsigned char *data)
{
unsigned char out[MAX_CT_LEN+16];
unsigned char *last_key;
unsigned char *hmacSalt;
unsigned char *expected_hmac;
unsigned char computed_hmac[DIGEST_LEN1];
unsigned char encKey[DIGEST_LEN1];
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
DES_set_key((DES_cblock *) key, &ks1);
DES_set_key((DES_cblock *) (key + 8), &ks2);
DES_set_key((DES_cblock *) (key + 16), &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, cur_salt->encrypted_len, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
hmacSalt = out;
expected_hmac = out + 16;
last_key = out + cur_salt->encrypted_len - 64;
hmac_sha1(pwdhash, 32, hmacSalt, 16, encKey, DIGEST_LEN1);
hmac_sha1(encKey, DIGEST_LEN1, last_key, 64, computed_hmac, DIGEST_LEN1);
return memcmp(expected_hmac, computed_hmac, DIGEST_LEN1);
}
static int decrypt_v2(unsigned char *key, unsigned char *iv, unsigned char *pwdhash, unsigned char *data)
{
unsigned char out[MAX_CT_LEN+16];
unsigned char *last_key;
unsigned char *hmacSalt;
unsigned char *expected_hmac;
unsigned char hmacComputed[DIGEST_LEN2];
unsigned char encKey[DIGEST_LEN2];
AES_KEY aeskey;
AES_set_decrypt_key(key, KEY_LEN2 * 8, &aeskey);
AES_cbc_encrypt(data, out, cur_salt->encrypted_len, &aeskey, iv, AES_DECRYPT);
hmacSalt = out;
expected_hmac = out + 16;
last_key = out + cur_salt->encrypted_len - 64;
hmac_sha512(pwdhash, 20, hmacSalt, IV_LEN2, encKey, DIGEST_LEN2);
hmac_sha512(encKey, DIGEST_LEN2, last_key, 64, hmacComputed, DIGEST_LEN2);
return memcmp(expected_hmac, hmacComputed, DIGEST_LEN2);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {
unsigned char *passwordBuf;
int passwordBufSize, digestlen = 20;
unsigned char out[MAX_KEYS_PER_CRYPT][KEY_LEN2 + IV_LEN2];
unsigned char out2[MAX_KEYS_PER_CRYPT][KEY_LEN2 + IV_LEN2];
SHA_CTX ctx;
MD4_CTX ctx2;
int i;
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
unsigned char *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
int loops = MAX_KEYS_PER_CRYPT;
if (cur_salt->version == 1)
loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;
else if (cur_salt->version == 2)
loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA512;
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
passwordBuf = (unsigned char*)saved_key[index+i];
passwordBufSize = strlen16((UTF16*)passwordBuf) * 2;
/* local credentials */
if (cur_salt->cred_type == 1) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, passwordBuf, passwordBufSize);
SHA1_Final(out[i], &ctx);
digestlen = 20;
}
/* domain credentials */
else if (cur_salt->cred_type == 2) {
MD4_Init(&ctx2);
MD4_Update(&ctx2, passwordBuf, passwordBufSize);
MD4_Final(out[i], &ctx2);
digestlen = 16;
}
passwordBuf = (unsigned char*)cur_salt->SID;
passwordBufSize = (strlen16(cur_salt->SID) + 1) * 2;
hmac_sha1(out[i], digestlen, passwordBuf, passwordBufSize, out2[i], 20);
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
lens[i] = 20;
pin[i] = (unsigned char*)out2[i];
x.pout[i] = out[i];
#endif
}
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
if (cur_salt->version == 1)
for (i = 0; i < loops; i++)
pbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, x.pout + (i * SSE_GROUP_SZ_SHA1), KEY_LEN1 + IV_LEN1, 0);
else if (cur_salt->version == 2)
for (i = 0; i < loops; i++)
pbkdf2_sha512_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA512), &lens[i * SSE_GROUP_SZ_SHA512], cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, x.pout + (i * SSE_GROUP_SZ_SHA512), KEY_LEN2 + IV_LEN2, 0);
#else
if (cur_salt->version == 1)
pbkdf2_sha1(out2[0], 20, cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, out[0], KEY_LEN1 + IV_LEN1, 0);
else if (cur_salt->version == 2)
pbkdf2_sha512(out2[0], 20, cur_salt->iv, MAX_IV_LEN, cur_salt->pbkdf2_iterations, out[0], KEY_LEN2 + IV_LEN2, 0);
#endif
if (cur_salt->version == 1) {
/* decrypt will use 32 bytes, we only initialized 20 so far */
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memset(out2[i] + 20, 0, 32 - 20);
if (decrypt_v1(out[i], out[i] + KEY_LEN1, out2[i], cur_salt->encrypted) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
else if (cur_salt->version == 2) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if (decrypt_v2(out[i], out[i] + KEY_LEN2, out2[i], cur_salt->encrypted) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void dpapimk_set_key(char *key, int index)
{
/* Convert key to UTF-16LE (--encoding aware) */
enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
}
static char *get_key(int index)
{
return (char*)utf16_to_enc(saved_key[index]);
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->pbkdf2_iterations;
}
struct fmt_main fmt_DPAPImk = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{
"iteration count",
},
{ FORMAT_TAG },
dpapimk_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
dpapimk_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
parallel_variable_transfer_utility.h | /*
==============================================================================
KratosStructuralApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
janosch.stascheit@rub.de
nagel@sd.rub.de
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
- Ruhr-University Bochum, Institute for Structural Mechanics, Germany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: nelson $
* Date: $Date: 2008-12-09 15:23:36 $
* Revision: $Revision: 1.2 $
*
* ***********************************************************/
#if !defined(KRATOS_PARALLEL_VARIABLE_TRANSFER_UTILITY_INCLUDED )
#define KRATOS_PARALLEL_VARIABLE_TRANSFER_UTILITY_INCLUDED
//System includes
//External includes
#include "boost/smart_ptr.hpp"
//Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "includes/element.h"
#include "integration/integration_point.h"
#include "geometries/geometry.h"
#include "linear_solvers/skyline_lu_factorization_solver.h"
#include "spaces/ublas_space.h"
#include "spaces/parallel_ublas_space.h"
#include "geometries/hexahedra_3d_8.h"
#include "geometries/tetrahedra_3d_4.h"
namespace Kratos
{
class ParallelVariableTransferUtility
{
public:
typedef Dof<double> TDofType;
typedef PointerVectorSet<TDofType, IndexedObject> DofsArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef double* ContainerType;
typedef Element::DofsVectorType DofsVectorType;
typedef Geometry<Node<3> >::IntegrationPointsArrayType IntegrationPointsArrayType;
typedef Geometry<Node<3> >::GeometryType GeometryType;
typedef Geometry<Node<3> >::CoordinatesArrayType CoordinatesArrayType;
typedef ParallelUblasSpace<double, CompressedMatrix, Vector> SpaceType;
typedef UblasSpace<double, Matrix, Vector> ParallelLocalSpaceType;
/**
* Constructor.
*/
ParallelVariableTransferUtility()
{
std::cout << "ParallelVariableTransferUtility created" << std::endl;
}
/**
* Destructor.
*/
virtual ~ParallelVariableTransferUtility()
{}
/**
* Initializes elements of target model part.
* @param rTarget new/target model part
* KLUDGE: new model part instance is not automatically initialized
*/
void InitializeModelPart( ModelPart& rTarget )
{
for( ModelPart::ElementIterator it = rTarget.ElementsBegin();
it!= rTarget.ElementsEnd(); it++ )
{
(*it).Initialize();
}
}
/**
* Transfer of nodal solution step variables.
* This Transfers all solution step variables from r_old_model_part
* to r_new_model_part.
* To cope with moved meshes, the source model_part is resetted to its
* reference configuration temporarily!
* @param r_old_model_part source model_part
* @param r_new_model_part target model_part
* TODO: find more elegant way to check existence of variables in each node
*/
void TransferNodalVariables(ModelPart& rSource, ModelPart& rTarget)
{
//reset source model part to reference configuration
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
//reset target model part to reference configuration
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
//time_target= time_source
ProcessInfo SourceCurrentProcessInfo= rSource.GetProcessInfo();
rTarget.CloneTimeStep(SourceCurrentProcessInfo[TIME]);
ElementsArrayType& OldMeshElementsArray= rSource.Elements();
Element correspondingElement;
// FixDataValueContainer newNodalValues;
// FixDataValueContainer oldNodalValues;
Point<3> localPoint;
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
if(FindPartnerElement(*(it), OldMeshElementsArray,
correspondingElement,localPoint))
{
//TransferVariables from Old Mesh to new Node
if(it->HasDofFor(DISPLACEMENT_X)
|| it->HasDofFor(DISPLACEMENT_Y)
|| it->HasDofFor(DISPLACEMENT_Z))
{
noalias(it->GetSolutionStepValue(DISPLACEMENT_NULL))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_NULL );
noalias(it->GetSolutionStepValue(DISPLACEMENT_EINS))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_EINS );
noalias(it->GetSolutionStepValue(DISPLACEMENT_NULL_DT))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_NULL_DT );
noalias(it->GetSolutionStepValue(ACCELERATION_NULL))=
MappedValue(correspondingElement,
localPoint,ACCELERATION_NULL );
noalias(it->GetSolutionStepValue(DISPLACEMENT_OLD))=
MappedValue(correspondingElement,
localPoint,DISPLACEMENT_OLD );
}
if(it->HasDofFor(WATER_PRESSURE))
{
it->GetSolutionStepValue(WATER_PRESSURE_NULL)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_NULL);
it->GetSolutionStepValue(WATER_PRESSURE_EINS)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_EINS);
it->GetSolutionStepValue(WATER_PRESSURE_NULL_DT)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_NULL_DT);
it->GetSolutionStepValue(WATER_PRESSURE_NULL_ACCELERATION)=
MappedValuePressure(correspondingElement, localPoint,
WATER_PRESSURE_NULL_ACCELERATION);
}
if(it->HasDofFor(AIR_PRESSURE))
{
it->GetSolutionStepValue(AIR_PRESSURE_NULL)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_NULL);
it->GetSolutionStepValue(AIR_PRESSURE_EINS)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_EINS);
it->GetSolutionStepValue(AIR_PRESSURE_NULL_DT)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_NULL_DT);
it->GetSolutionStepValue(AIR_PRESSURE_NULL_ACCELERATION)=
MappedValuePressure(correspondingElement, localPoint,
AIR_PRESSURE_NULL_ACCELERATION);
}
std::cout <<"VARIABLES TRANSFERRED" << std::endl;
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferNodalVariables(...)#####"<<std::endl;
}
}
//restore source model_part
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
//restore target model_part
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
}
/**
* Transfer of INSITU_STRESS.
* This transfers the in-situ stress from rSource to rTarget.
* @param rSource the source model part
* @param rTarget the target model part
*/
void TransferInSituStress( ModelPart& rSource, ModelPart& rTarget )
{
//reset original model part to reference configuration
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
TransferVariablesToNodes(rSource, INSITU_STRESS);
// TransferVariablesBetweenMeshes(rSource, rTarget,INSITU_STRESS);
// TransferVariablesToGaussPoints(rTarget, INSITU_STRESS);
TransferVariablesToGaussPoints(rSource, rTarget, INSITU_STRESS);
//restore model_part
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
}
/**
* Transfer of integration point variables.
* This Transfers all variables on integration points from r_old_model_part
* to r_new_model_part.
* To cope with moved meshes, the source model_part is resetted to its
* reference configuration temporarily!
* @param r_old_model_part source model_part
* @param r_new_model_part target model_part
* TODO: find more elegant way to check existence of variables in each node
* CAUTION: THIS MAY CREATE VARIABLES ON NODES THAT MIGHT CAUSE A SEGMENTATION
* FAULT ON RUNTIME
*/
void TransferConstitutiveLawVariables(ModelPart& rSource, ModelPart& rTarget)
{
//reset source model part to reference configuration
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
//reset target model part to reference configuration
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0();
(*it).Y() = (*it).Y0();
(*it).Z() = (*it).Z0();
}
TransferVariablesToNodes(rSource, ELASTIC_LEFT_CAUCHY_GREEN_OLD);
// TransferVariablesBetweenMeshes(rSource, rTarget,ELASTIC_LEFT_CAUCHY_GREEN_OLD);
//
// TransferVariablesToGaussPoints(rTarget, ELASTIC_LEFT_CAUCHY_GREEN_OLD);
TransferVariablesToGaussPoints( rSource, rTarget, ELASTIC_LEFT_CAUCHY_GREEN_OLD);
for( ModelPart::NodeIterator it = rSource.NodesBegin() ;
it != rSource.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
// restore target model_part
for( ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd(); it++ )
{
(*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X );
(*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y );
(*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z );
}
}
/**
* Transfer of rThisVariable stored on nodes to integration point via
* approximation by shape functions
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Matrix>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points =
(*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Matrix> ValuesOnIntPoint(integration_points.size());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
for( unsigned int PointNumber = 0;
PointNumber<integration_points.size();
PointNumber++)
{
ValuesOnIntPoint[PointNumber].resize(3,3,false);
noalias(ValuesOnIntPoint[PointNumber])= ZeroMatrix(3,3);
for(unsigned int node= 0; node< (*it)->GetGeometry().size(); node++)
{
ValuesOnIntPoint[PointNumber]
+=Ncontainer(PointNumber, node)*
(*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable);
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
model_part.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes to integration point via
* approximation by shape functions
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
unsigned int NodesDispMin= 1;
unsigned int NodesDispMax= (*it)->GetGeometry().size();
const IntegrationPointsArrayType& integration_points =
(*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
for( unsigned int PointNumber = 0;
PointNumber<integration_points.size();
PointNumber++)
{
ValuesOnIntPoint[PointNumber].resize(6,false);
noalias(ValuesOnIntPoint[PointNumber])= ZeroVector(6);
for(unsigned int node= NodesDispMin-1; node< NodesDispMax; node++)
{
ValuesOnIntPoint[PointNumber]
+=Ncontainer(PointNumber, node)*
(*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable);
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
model_part.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes to integration point via
* approximation by shape functions
* @param model_part model_part on which the transfer should be done
* @param rThisVariable double-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<Kratos::Vector>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& model_part,
Variable<double>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
unsigned int NodesDispMin= 1;
unsigned int NodesDispMax= (*it)->GetGeometry().size();
const IntegrationPointsArrayType& integration_points =
(*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<double> ValuesOnIntPoint(integration_points.size());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
for( unsigned int PointNumber = 0;
PointNumber<integration_points.size();
PointNumber++)
{
ValuesOnIntPoint[PointNumber]= 0.0;
for(unsigned int node= NodesDispMin-1; node< NodesDispMax; node++)
{
ValuesOnIntPoint[PointNumber]
+=Ncontainer(PointNumber, node)*
(*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable);
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
model_part.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes in source mesh to integration point of target
* mesh via approximation by shape functions
* @param rSource
* @param rTarget
* @param rThisVariable Vector-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=TargetMeshElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=TargetMeshElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
for(unsigned int point=0; point< integration_points.size(); point++)
{
Point<3> sourceLocalPoint;
Point<3> targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point<3> targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint);
Element sourceElement;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
ValuesOnIntPoint[point].resize(6,false);
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
noalias(ValuesOnIntPoint[point])=
ValueVectorInOldMesh(sourceElement, sourceLocalPoint, rThisVariable );
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
rTarget.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes in source mesh to integration point of target
* mesh via approximation by shape functions
* @param rSource
* @param rTarget
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part,
* ModelPart& source_model_part, Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part,
* ModelPart& source_model_part, Variable<double>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& rSource,
ModelPart& rTarget,
Variable<Matrix> const& rThisVariable)
{
ElementsArrayType const& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=TargetMeshElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=TargetMeshElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<Matrix> ValuesOnIntPoint(integration_points.size());
for(unsigned int point=0; point< integration_points.size(); point++)
{
Point<3> sourceLocalPoint;
Point<3> targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point<3> targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint);
Element sourceElement;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement, sourceLocalPoint))
{
ValuesOnIntPoint[point].resize(3,3,false);
ValuesOnIntPoint[point] = ZeroMatrix(3,3);
ValuesOnIntPoint[point] = ValueMatrixInOldMesh(sourceElement, sourceLocalPoint, rThisVariable );
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
rTarget.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable stored on nodes in source mesh to integration point of target
* mesh via approximation by shape functions
* @param rSource
* @param rTarget
* @param rThisVariable double-Variable which should be transferred
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart&
source_model_part, Variable<Kratos::Vector>& rThisVariable)
*/
void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget,
Variable<double>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=TargetMeshElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=TargetMeshElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
std::vector<double> ValuesOnIntPoint(integration_points.size());
for(unsigned int point=0; point< integration_points.size(); point++)
{
Point<3> sourceLocalPoint;
Point<3> targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point<3> targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint);
Element sourceElement;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
ValuesOnIntPoint[point]=
MappedValue(sourceElement, sourceLocalPoint, rThisVariable );
}
}
(*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint,
rTarget.GetProcessInfo());
}
}
double stop_prod = omp_get_wtime();
std::cout << "transfer time: " << stop_prod - start_prod << std::endl;
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*/
void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = model_part.NodesBegin();
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroMatrix(3,3);
}
//SetUpEquationSystem
SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes());
SpaceType::VectorType g(model_part.NumberOfNodes());
SpaceType::VectorType b(model_part.NumberOfNodes());
noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes());
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++)
{
for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1),
((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
}
// for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
// it != ElementsArray.ptr_end(); ++it )
// {
// const IntegrationPointsArrayType& integration_points
// = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
//
// GeometryType::JacobiansType J(integration_points.size());
// J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
//
// const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
// //
// Matrix InvJ(3,3);
// double DetJ;
// for(unsigned int point=0; point< integration_points.size(); point++)
// {
// MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
//
// double dV= DetJ*integration_points[point].Weight();
//
// for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++)
// {
// for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++)
// {
// M(((*it)->GetGeometry()[prim].Id()-1),
// ((*it)->GetGeometry()[sec].Id()-1))+=
// Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
// }
// }
// }
// }
double stop_prod = omp_get_wtime();
std::cout << "assembling time: " << stop_prod - start_prod << std::endl;
for(unsigned int firstvalue=0; firstvalue<3; firstvalue++)
{
for(unsigned int secondvalue=0; secondvalue<3; secondvalue++)
{
noalias(g)= ZeroVector(model_part.NumberOfNodes());
noalias(b)= ZeroVector(model_part.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
it != ElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<Matrix> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=(ValuesOnIntPoint[point](firstvalue,secondvalue))
*Ncontainer(point, prim)*dV;
}
}
}
double start_solve = omp_get_wtime();
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
double stop_solve = omp_get_wtime();
std::cout << "solving time: " << stop_solve - start_solve << std::endl;
for(ModelPart::NodeIterator it = model_part.NodesBegin() ;
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue)
= g((it->Id()-1));
}
}//END firstvalue
}//END secondvalue
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*/
void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = model_part.NodesBegin();
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroVector(6);
}
//SetUpEquationSystem
SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes());
SpaceType::VectorType g(model_part.NumberOfNodes());
SpaceType::VectorType b(model_part.NumberOfNodes());
noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes());
/*
for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
it != ElementsArray.ptr_end(); ++it )
{
//SetUpEquationSystem
SpaceType::MatrixType M((*it)->GetGeometry().size(),(*it)->GetGeometry().size());
SpaceType::VectorType g((*it)->GetGeometry().size());
SpaceType::VectorType b((*it)->GetGeometry().size());
noalias(M)= ZeroMatrix((*it)->GetGeometry().size(),(*it)->GetGeometry().size());
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++)
{
M(prim,sec)+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
for(unsigned int firstvalue=0; firstvalue<6; firstvalue++)
{
noalias(g)= ZeroVector((*it)->GetGeometry().size());
noalias(b)= ZeroVector((*it)->GetGeometry().size());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(prim)
+=(ValuesOnIntPoint[point](firstvalue))
*Ncontainer(point, prim)*dV;
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
(*it)->GetGeometry()[prim].GetSolutionStepValue(rThisVariable)(firstvalue)
= g(prim);
}
}//END firstvalue
}
*/
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer =
(*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1),
((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "assembling time: " << stop_prod - start_prod << std::endl;
for(unsigned int firstvalue=0; firstvalue<6; firstvalue++)
{
noalias(g)= ZeroVector(model_part.NumberOfNodes());
noalias(b)= ZeroVector(model_part.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin();
it != ElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints( (*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<Vector> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=(ValuesOnIntPoint[point](firstvalue))
*Ncontainer(point, prim)*dV;
}
}
}
double start_solve = omp_get_wtime();
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
double stop_solve = omp_get_wtime();
std::cout << "solving time: " << stop_solve - start_solve << std::endl;
for(ModelPart::NodeIterator it = model_part.NodesBegin() ;
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue)
= g((it->Id()-1));
}
}//END firstvalue
}
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
*/
void TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable)
{
ElementsArrayType& ElementsArray= model_part.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = model_part.NodesBegin();
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= 0.0;
}
//SetUpEquationSystem
SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes());
noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes());
SpaceType::VectorType g(model_part.NumberOfNodes());
noalias(g)= ZeroVector(model_part.NumberOfNodes());
SpaceType::VectorType b(model_part.NumberOfNodes());
noalias(b)= ZeroVector(model_part.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, ElementsArray.size(), element_partition);
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
typename ElementsArrayType::ptr_iterator it_begin=ElementsArray.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=ElementsArray.ptr_begin()+element_partition[k+1];
for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
std::vector<double> ValuesOnIntPoint(integration_points.size());
(*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=(ValuesOnIntPoint[point])
*Ncontainer(point, prim)*dV;
for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1),
((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "assembling time: " << stop_prod - start_prod << std::endl;
double start_solve = omp_get_wtime();
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
double stop_solve = omp_get_wtime();
std::cout << "solving time: " << stop_solve - start_solve << std::endl;
for(ModelPart::NodeIterator it = model_part.NodesBegin() ;
it != model_part.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= g((it->Id()-1));
}
}
/**
* Transfer of rThisVariable stored on nodes form source mesh to target mesh.
* The transformation is done in a way that ensures a minimization
* of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas
* f(x)_old/new= /sum{shape_func_i*rThisVariable_i}
* @param rSource source model_part
* @param rTarget target model_part
* @param rThisVariable Matrix-Variable which should be transferred
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
*/
void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Matrix>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = rTarget.NodesBegin();
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroMatrix(3,3);
}
//SetUpEquationSystem
SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
SpaceType::VectorType g(rTarget.NumberOfNodes());
SpaceType::VectorType b(rTarget.NumberOfNodes());
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++)
{
for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
for(unsigned int firstvalue= 0; firstvalue< 3; firstvalue++)
{
for(unsigned int secondvalue= 0; secondvalue< 3; secondvalue++)
{
noalias(b)= ZeroVector(rTarget.NumberOfNodes());
noalias(g)= ZeroVector(rTarget.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
Point<3> sourceLocalPoint;
Point<3> targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point<3> targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,
targetLocalPoint);
Element sourceElement;
double functionValue;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
functionValue=
ValueMatrixInOldMesh( sourceElement,sourceLocalPoint,rThisVariable, firstvalue, secondvalue );
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...Matrix...)#####"<<std::endl;
continue;
}
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0; prim<(*it)->GetGeometry().size(); prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=functionValue
*Ncontainer(point, prim)*dV;
}
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue)
= g((it->Id()-1));
}
}//END firstvalue
}//END secondvalue
}
/**
* Transfer of rThisVariable stored on nodes form source mesh to target mesh.
* The transformation is done in a way that ensures a minimization
* of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas
* f(x)_old/new= /sum{shape_func_i*rThisVariable_i}
* @param rSource source model_part
* @param rTarget target model_part
* @param rThisVariable Vector-Variable which should be transferred
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTargetw,
Variable<double>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
*/
void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = rTarget.NodesBegin();
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= ZeroVector(6);
}
//SetUpEquationSystem
SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
SpaceType::VectorType g(rTarget.NumberOfNodes());
SpaceType::VectorType b(rTarget.NumberOfNodes());
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ;prim<(*it)->GetGeometry().size() ;prim++)
{
for(unsigned int sec=0 ;sec<(*it)->GetGeometry().size() ;sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
for(unsigned int firstvalue= 0; firstvalue< 6; firstvalue++)
{
noalias(b)= ZeroVector(rTarget.NumberOfNodes());
noalias(g)= ZeroVector(rTarget.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
Point<3> sourceLocalPoint;
Point<3> targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point<3> targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,
targetLocalPoint);
Element sourceElement;
double functionValue;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
functionValue=
ValueVectorInOldMesh( sourceElement,sourceLocalPoint,rThisVariable, firstvalue);
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...Vector...)#####"<<std::endl;
continue;
}
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ;prim<(*it)->GetGeometry().size() ;prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=functionValue
*Ncontainer(point, prim)*dV;
}
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)(firstvalue)
= g((it->Id()-1));
}
}//END firstvalue
}
/**
* Transfer of rThisVariable stored on nodes form source mesh to target mesh.
* The transformation is done in a way that ensures a minimization
* of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas
* f(x)_old/new= /sum{shape_func_i*rThisVariable_i}
* @param rSource source model_part
* @param rTarget target model_part
* @param rThisVariable double-Variable which should be transferred
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Matrix>& rThisVariable)
* @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<Kratos::Vector>& rThisVariable)
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
*/
void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget,
Variable<double>& rThisVariable)
{
ElementsArrayType& SourceMeshElementsArray= rSource.Elements();
ElementsArrayType& TargetMeshElementsArray= rTarget.Elements();
//loop over all master surfaces (global search)
for(ModelPart::NodeIterator it = rTarget.NodesBegin();
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= 0.0;
}
//SetUpEquationSystem
SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes());
SpaceType::VectorType g(rTarget.NumberOfNodes());
noalias(g)= ZeroVector(rTarget.NumberOfNodes());
SpaceType::VectorType b(rTarget.NumberOfNodes());
noalias(b)= ZeroVector(rTarget.NumberOfNodes());
//Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization
// see Jiao + Heath "Common-refinement-based data tranfer ..."
// International Journal for numerical methods in engineering 61 (2004) 2402--2427
// for general description of L_2-Minimization
for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin();
it != TargetMeshElementsArray.ptr_end();
++it )
{
const IntegrationPointsArrayType& integration_points
= (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod());
GeometryType::JacobiansType J(integration_points.size());
J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod());
const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod());
Matrix InvJ(3,3);
double DetJ;
for(unsigned int point=0; point< integration_points.size(); point++)
{
MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ);
Point<3> sourceLocalPoint;
Point<3> targetLocalPoint;
noalias(targetLocalPoint)= integration_points[point];
Point<3> targetGlobalPoint;
(*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,
targetLocalPoint);
Element sourceElement;
double functionValue;
//Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh
if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray,
sourceElement,sourceLocalPoint))
{
functionValue=
MappedValue( sourceElement,sourceLocalPoint,rThisVariable);
}
else
{
std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...double...)#####"<<std::endl;
continue;
}
double dV= DetJ*integration_points[point].Weight();
for(unsigned int prim=0 ;prim<(*it)->GetGeometry().size() ;prim++)
{
b(((*it)->GetGeometry()[prim].Id()-1))
+=functionValue*Ncontainer(point, prim)*dV;
for(unsigned int sec=0; sec<(*it)->GetGeometry().size(); sec++)
{
M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+=
Ncontainer(point, prim)*Ncontainer(point, sec)*dV;
}
}
}
}
SkylineLUFactorizationSolver<SpaceType, SpaceType>().Solve(M, g, b);
for(ModelPart::NodeIterator it = rTarget.NodesBegin() ;
it != rTarget.NodesEnd() ; it++)
{
it->GetSolutionStepValue(rThisVariable)
= g((it->Id()-1));
}
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param oldElement corresponding element in source mesh
* @param localPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
* @see MappedValue( Element& sourceElement, Point<3>& targetPoint,
const Variable<double>& rThisVariable)
*/
Matrix ValueMatrixInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
{
Matrix newValue(3,3);
noalias(newValue)= ZeroMatrix(3,3);
Matrix temp(3,3);
for(unsigned int i=0; i< oldElement.GetGeometry().size(); i++)
{
noalias(temp)= oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
for(unsigned int k=0; k<3; k++)
for(unsigned int l=0; l<3; l++)
newValue(k,l)
+=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)
*temp(k,l);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param oldElement corresponding element in source mesh
* @param localPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueMatrixInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
* @see MappedValue( Element& sourceElement, Point<3>& targetPoint,
const Variable<double>& rThisVariable)
*/
Vector ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
{
Vector newValue(6);
noalias(newValue) = ZeroVector(6);
Vector temp(6);
for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++)
{
noalias(temp)= oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
for(unsigned int k=0; k<6; k++)
newValue(k) +=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)*temp(k);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueMatrixInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
* @see ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
*/
double MappedValuePressure( Element& sourceElement, Point<3>& targetPoint,
const Variable<double>& rThisVariable)
{
double newValue = 0.0;
Geometry<Node<3> >::Pointer pPressureGeometry;
if(sourceElement.GetGeometry().size()==20 || sourceElement.GetGeometry().size()==27)
pPressureGeometry= Geometry<Node<3> >::Pointer(new Hexahedra3D8 <Node<3> >(
sourceElement.GetGeometry()(0),sourceElement.GetGeometry()(1),
sourceElement.GetGeometry()(2),sourceElement.GetGeometry()(3),
sourceElement.GetGeometry()(4),sourceElement.GetGeometry()(5),
sourceElement.GetGeometry()(6),sourceElement.GetGeometry()(7)));
if(sourceElement.GetGeometry().size()==10 )
pPressureGeometry= Geometry<Node<3> >::Pointer(new Tetrahedra3D4 <Node<3> >(
sourceElement.GetGeometry()(0),sourceElement.GetGeometry()(1),
sourceElement.GetGeometry()(2),sourceElement.GetGeometry()(3)));
for(unsigned int i= 0; i< pPressureGeometry->size(); i++)
{
newValue+=
pPressureGeometry->ShapeFunctionValue( i, targetPoint)
*sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @see ValueMatrixInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Matrix>& rThisVariable )
* @see ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable )
*/
double MappedValue( Element& sourceElement, Point<3>& targetPoint,
const Variable<double>& rThisVariable)
{
double newValue = 0.0;
for(unsigned int i= 0; i< sourceElement.GetGeometry().size(); i++)
{
newValue+=
sourceElement.GetGeometry().ShapeFunctionValue( i, targetPoint)
*sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Variable by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
*/
Vector MappedValue( Element& sourceElement, Point<3>& targetPoint,
const Variable<array_1d<double, 3 > >& rThisVariable)
{
Vector newValue = ZeroVector(3);
for(unsigned int i=0; i<sourceElement.GetGeometry().size(); i++)
{
newValue+=
sourceElement.GetGeometry().ShapeFunctionValue( i, targetPoint)
*sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable);
}
return newValue;
}
/**
* calculates for a point given with the physical coords newNode
* the element oldElement where it lays in and the natural coords
* localPoint within this element
* @return whether a corresponding element and natural coords could be found
* @param newNode physical coordinates of given point
* @param OldMeshElementsArray Array of elements wherein the search should be performed
* @param oldElement corresponding element for newNode
* @param rResult corresponding natural coords for newNode
* TODO: find a faster method for outside search (hextree? etc.), maybe outside this
* function by restriction of OldMeshElementsArray
*/
bool FindPartnerElement( CoordinatesArrayType& newNode,
const ElementsArrayType& OldMeshElementsArray,
Element& oldElement, Point<3>& rResult)
{
bool partner_found= false;
//noalias(rResult)= ZeroVector(3);
ElementsArrayType::Pointer OldElementsSet( new ElementsArrayType() );
std::vector<double > OldMinDist;
bool newMinDistFound= false;
int counter = 0;
do
{
double minDist = 1.0e120;
newMinDistFound= false;
OldElementsSet->clear();
//loop over all master surfaces (global search)
for( ElementsArrayType::ptr_const_iterator it = OldMeshElementsArray.ptr_begin();
it != OldMeshElementsArray.ptr_end(); ++it )
{
//loop over all nodes in tested element
for( unsigned int n=0; n<(*it)->GetGeometry().size(); n++ )
{
double dist = ((*it)->GetGeometry().GetPoint(n).X0()-newNode[0])
*((*it)->GetGeometry().GetPoint(n).X0()-newNode[0])
+((*it)->GetGeometry().GetPoint(n).Y0()-newNode[1])
*((*it)->GetGeometry().GetPoint(n).Y0()-newNode[1])
+((*it)->GetGeometry().GetPoint(n).Z0()-newNode[2])
*((*it)->GetGeometry().GetPoint(n).Z0()-newNode[2]);
if( fabs(dist-minDist) < 1e-7 )
{
OldElementsSet->push_back(*it);
}
else if( dist < minDist )
{
bool alreadyUsed= false;
for(unsigned int old_dist= 0; old_dist<OldMinDist.size(); old_dist++)
{
if(fabs(dist- OldMinDist[old_dist])< 1e-7 )
alreadyUsed= true;
}
if(!alreadyUsed)
{
OldElementsSet->clear();
minDist = dist;
OldElementsSet->push_back(*it);
newMinDistFound= true;
}
}
}
}
OldMinDist.push_back(minDist);
// KRATOS_WATCH(OldElementsSet->size());
for( ElementsArrayType::ptr_const_iterator it = OldElementsSet->ptr_begin();
it != OldElementsSet->ptr_end(); ++it )
{
// std::cout << "checking elements list" << std::endl;
if( (*it)->GetGeometry().IsInside( newNode, rResult ) )
{
// std::cout << "isInside" << std::endl;
oldElement=**(it);
partner_found=true;
return partner_found;
}
}
std::cout << counter << std::endl;
counter++;
if( counter > 27 )
break;
}while(newMinDistFound);
if(!partner_found)
std::cout<<" !!!! NO PARTNER FOUND !!!! "<<std::endl;
return partner_found;
}
//***************************************************************************
//***************************************************************************
/**
* Auxiliary function.
* This one calculates the target value of given Matrix-Variable at row firtsvalue
* and column secondvalue by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @param firstvalue row index
* @param secondvalue column index
* @see ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue)
*/
double ValueMatrixInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Matrix>& rThisVariable, unsigned int firstvalue, unsigned int secondvalue )
{
double newValue= 0.0;
for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++)
{
newValue
+=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)
*oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue);
}
return newValue;
}
/**
* Auxiliary function.
* This one calculates the target value of given Vector-Variable at firtsvalue
* by shape-function-based
* interpolation of the nodal values from given source element to the given
* target point that is assumed to lie within the source element
* @return value of given variable in new point
* @param sourceElement corresponding element in source mesh
* @param targetPoint given target point to map the variable to
* @param rThisVariable given variable to be transferred
* @param firstvalue index
* @see ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue)
*/
double ValueVectorInOldMesh(Element& oldElement, Point<3>& localPoint,
const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue )
{
double newValue= 0.0;
for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++)
{
newValue
+=oldElement.GetGeometry().ShapeFunctionValue( i, localPoint)
*oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable)(firstvalue);
}
return newValue;
}
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
};//Class Scheme
}//namespace Kratos.
#endif /* KRATOS_PARALLEL_VARIABLE_TRANSFER_UTILITY_INCLUDED defined */
|
lda.h | #ifndef __LDA
#define __LDA
#include <vector>
#include <random>
#include <omp.h>
#include <algorithm>
#include <chrono>
#include <thread>
#include <mutex>
#include <deque>
#include <mpi.h>
#include <fstream>
#include "glog/logging.h"
#include "types.h"
#include "guide_table.h"
#include "dcm.h"
#include "xorshift.h"
#include "distributions.h"
#include "thread_local.h"
#include "hash_table.h"
using std::vector;
using std::pair;
inline bool compare(const SpEntry &x, const SpEntry &y) {
return x.v > y.v;
}
class LDA {
public:
TTopic K;
vector<TProb> alpha;
TProb beta, alphaBar, betaBar;
/// notice : log_likelihood need double precision to work correctly
TLikehood log_likelihood;
vector<TLikehood> llthread;
ThreadLocal<xorshift> generators;
ThreadLocal<vector<TProb>> phis;
GuideTable prior1Table;
vector<TProb> priorCwk;
vector<TProb> prior1Prob;
TProb prior1Sum;
ThreadLocal<GuideTable> prior2Table;
ThreadLocal<vector<TTopic>> prior2NNZ;
ThreadLocal<vector<TProb>> prior2Prob;
ThreadLocal<vector<TProb>> probs;
UniformRealDistribution<TProb> u01;
TIter iter;
CVA<int> &corpus;
// MPI
TId process_size, process_id, monitor_id;
TLen thread_size;
TCount num_words, num_docs;
vector<TCount> word_per_doc;
TCount doc_split_size, word_split_size;
vector<TProb> inv_ck;
DCMSparse cwk;
DCMSparse cdk;
LocalMergeStyle local_merge_style;
size_t global_token_number;
TCount global_word_number;
// count the word frequency belong to this node
vector<TCount> word_frequency;
vector<TCount> local_word_frequency, global_word_frequency;
LDA(TIter iter, TTopic K, TProb alpha, TProb beta, CVA<int> &corpus,
const TId process_size, const TId process_id, const TLen thread_size,
const TCount num_docs, const TCount num_words, const TCount doc_split_size,
const TCount word_split_size, LocalMergeStyle local_merge_style)
: K(K), alpha(K, alpha), beta(beta), alphaBar(alpha * K), iter(iter),
corpus(corpus),
process_size(process_size), process_id(process_id), thread_size(thread_size),
num_docs(num_docs), num_words(num_words), doc_split_size(doc_split_size),
word_split_size(word_split_size), local_merge_style(local_merge_style),
cwk(word_split_size, doc_split_size, num_words, K, column_partition, process_size,
process_id, thread_size, local_merge_style, 0),
cdk(doc_split_size, word_split_size, num_docs, K, row_partition, process_size,
process_id, thread_size, local_merge_style, 0) {
/*
printf("pid %d LDA constructor row_size : %d, column_size : %d, process_size : %d, process_id : %d, thread_size : %d\n",
process_id, cwk.row_size, cwk.column_size, cwk.process_size, cwk.process_id, cwk.thread_size);
printf("pid %d LDA constructor row_head : %d, row_tail : %d\n", cwk.process_id, cwk.row_head, cwk.row_tail);
*/
MPI_Comm doc_partition;
MPI_Comm_split(MPI_COMM_WORLD, process_id / word_split_size, process_id, &doc_partition);
TCount local_word_number = num_words;
MPI_Allreduce(&local_word_number, &global_word_number, 1, MPI_INT, MPI_SUM, doc_partition);
betaBar = beta * global_word_number;
word_per_doc.resize(num_docs);
llthread.resize(thread_size);
inv_ck.resize(K);
priorCwk.resize(K);
prior1Prob.resize(K);
size_t local_token_number = corpus.size() / sizeof(int);
MPI_Allreduce(&local_token_number, &global_token_number, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
// Initialize generators
std::random_device rd;
for (auto &gen: generators) gen.seed(rd(), rd());
u01 = decltype(u01)(0, 1, generators.Get(0));
word_frequency.resize(num_words);
local_word_frequency.resize(num_words);
global_word_frequency.resize(num_words);
monitor_id = 0;
}
virtual void Estimate();
virtual ~LDA() { }
void iterWord();
void outputTopicWord(vector<SpEntry> &topic_word, vector<TIndex>wordmap, int frequent_word_number) {
for (TIndex local_w = 0; local_w < num_words; ++local_w) {
auto sparse_row = cwk.row(local_w);
for (auto entry: sparse_row) {
TTopic topic = entry.k;
TCount cnt = entry.v;
for (TIndex i = 0; i < frequent_word_number; ++i) {
TTopic offset = topic * frequent_word_number + i;
if (cnt > topic_word[offset].v) {
topic_word[offset].k = wordmap[local_w];
topic_word[offset].v = cnt;
break;
}
}
}
}
/*
* code backup for debug
ofstream fout("/home/yama/btm/BigTopicModel/data/nips.wf-tail." + to_string(process_id));
for (TIndex word = 0; word < num_words; ++word) {
fout << wordmap[word] << " " << word_frequency[word] << "\n";
}
fout << endl;
for (TIndex topic = 0; topic < K; ++topic) {
std::sort(ltw[topic].begin(), ltw[topic].end(), compare);
fout << ltw[topic].size() << " : ";
for (auto entry: ltw[topic])
fout << wordmap[entry.k] << " " << entry.v << ",\t";
fout << endl;
}
fout.close();
*/
}
void corpusStat(vector<TIndex>wordmap, string prefix) {
//#pragma omp parallel for
for (TWord v = 0; v < num_words; v++) {
auto row = corpus.Get(v);
local_word_frequency[v] = row.size();
}
MPI_Comm word_partition;
MPI_Comm_split(MPI_COMM_WORLD, process_id % word_split_size, process_id, &word_partition);
MPI_Allreduce(local_word_frequency.data(), global_word_frequency.data(), global_word_frequency.size(),
MPI_INT, MPI_SUM, word_partition);
// show the orig word frequency
ofstream fout(prefix + ".wf-head." + to_string(process_id));
for (TIndex word = 0; word < num_words; ++word) {
fout << wordmap[word] << " " << global_word_frequency[word] << "\n";
}
fout.close();
}
};
#endif
|
matProduct2.c | /*
OpenMP implementation of matrix multiplication. Each thread takes care
a chunk of rows.
Compile with gcc -O3 -fopenmp omp_matrixmult.c -o omp_matrixmult
*/
// Online source: http://users.abo.fi/mats/PP2012/examples/OpenMP/omp_critical.c
// permission obtained
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
/* Number of threads used */
#define NR_THREADS 4
/*
#define DEBUG 0
#define NRA 1400 // number of rows in matrix A
#define NCA 1400 // number of columns in matrix A
#define NCB 1400 // number of columns in matrix B
*/
/* Use smaller matrices for testing and debugging */
#define DEBUG 1
#define NRA 10 // number of rows in matrix A
#define NCA 10 // number of columns in matrix A
#define NCB 10 // number of columns in matrix B
int main (int argc, char *argv[]) {
int tid, nthreads, i, j, k;
double **a, **b, **c;
double *a_block, *b_block, *c_block;
double **res;
double *res_block;
double starttime, stoptime;
a = (double **) malloc(NRA*sizeof(double *)); /* matrix a to be multiplied */
b = (double **) malloc(NCA*sizeof(double *)); /* matrix b to be multiplied */
c = (double **) malloc(NRA*sizeof(double *)); /* result matrix c */
a_block = (double *) malloc(NRA*NCA*sizeof(double)); /* Storage for matrices */
b_block = (double *) malloc(NCA*NCB*sizeof(double));
c_block = (double *) malloc(NRA*NCB*sizeof(double));
/* Result matrix for the sequential algorithm */
res = (double **) malloc(NRA*sizeof(double *));
res_block = (double *) malloc(NRA*NCB*sizeof(double));
for (i=0; i<NRA; i++) /* Initialize pointers to a */
a[i] = a_block+i*NRA;
for (i=0; i<NCA; i++) /* Initialize pointers to b */
b[i] = b_block+i*NCA;
for (i=0; i<NRA; i++) /* Initialize pointers to c */
c[i] = c_block+i*NRA;
for (i=0; i<NRA; i++) /* Initialize pointers to res */
res[i] = res_block+i*NRA;
/* A static allocation of the matrices would be done like this */
/* double a[NRA][NCA], b[NCA][NCB], c[NRA][NCB]; */
/*** Spawn a parallel region explicitly scoping all variables ***/
#pragma omp parallel shared(a,b,c,nthreads) private(tid,i,j,k) num_threads(NR_THREADS)
{
tid = omp_get_thread_num();
if (tid == 0) { /* Only thread 0 prints */
nthreads = omp_get_num_threads();
printf("Starting matrix multiplication with %d threads\n",nthreads);
printf("Initializing matrices...\n");
}
/*** Initialize matrices ***/
#pragma omp for nowait /* No need to synchronize the threads before the */
for (i=0; i<NRA; i++) /* last matrix has been initialized */
for (j=0; j<NCA; j++)
a[i][j]= (double) (i+j);
#pragma omp for nowait
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= (double) (i*j);
#pragma omp for /* We synchronize the threads after this */
for (i=0; i<NRA; i++)
for (j=0; j<NCB; j++)
c[i][j]= 0.0;
if (tid == 0) /* Thread zero measures time */
starttime = omp_get_wtime(); /* Master thread measures the execution time */
/* Do matrix multiply sharing iterations on outer loop */
/* If DEBUG is TRUE display who does which iterations */
/* printf("Thread %d starting matrix multiply...\n",tid); */
#pragma omp for
for (i=0; i<NRA; i++) {
if (DEBUG) printf("Thread=%d did row=%d\n",tid,i);
for(j=0; j<NCB; j++) {
for (k=0; k<NCA; k++) {
c[i][j] += a[i][k] * b[k][j];
}
}
}
if (tid == 0) {
stoptime = omp_get_wtime();
printf("Time for parallel matrix multiplication: %3.2f s\n",
stoptime-starttime);
}
} /*** End of parallel region ***/
starttime = omp_get_wtime();
/* Do a sequential matrix multiplication and compare the results */
for (i=0; i<NRA; i++) {
for (j=0; j<NCB; j++) {
res[i][j] = 0.0;
for (k=0; k<NCA; k++)
res[i][j] += a[i][k]*b[k][j];
}
}
stoptime = omp_get_wtime();
printf("Time for sequential matrix multiplication: %3.2f s\n", stoptime-starttime);
/* Check that the results are the same as in the parallel solution.
Actually, you should not compare floating point values for equality like this
but instead compute the difference between the two values and check that it
is smaller than a very small value epsilon. However, since all values in the
matrices here are integer values, this will work.
*/
for (i=0; i<NRA; i++) {
for (j=0; j<NCB; j++) {
if (res[i][j] == c[i][j]) {
/* Everything is OK if they are equal */
}
else {
printf("Different result %5.1f != %5.1f in %d %d\n ", res[i][j], c[i][j], i, j);
}
}
}
/* If DEBUG is true, print the results. Usa smaller matrices for this */
if (DEBUG) {
printf("Result Matrix:\n");
for (i=0; i<NRA; i++) {
for (j=0; j<NCB; j++)
printf("%6.1f ", c[i][j]);
printf("\n");
}
}
printf ("Done.\n");
exit(0);
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0 + 4, _d1);
_mm_storeu_ps(out_tm0 + 8, _d2);
_mm_storeu_ps(out_tm0 + 12, _d3);
#else
float d0[4], d1[4], d2[4], d3[4];
float w0[4], w1[4], w2[4], w3[4];
float t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1 + 8);
_k0 = _mm256_loadu_ps(k0 + 16);
_k0n = _mm256_loadu_ps(k0 + 24);
_k1 = _mm256_loadu_ps(k1 + 16);
_k1n = _mm256_loadu_ps(k1 + 24);
_k2 = _mm256_loadu_ps(k2 + 16);
_k2n = _mm256_loadu_ps(k2 + 24);
_k3 = _mm256_loadu_ps(k3 + 16);
_k3n = _mm256_loadu_ps(k3 + 24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2 + 8);
_k0 = _mm256_loadu_ps(k0 + 32);
_k0n = _mm256_loadu_ps(k0 + 40);
_k1 = _mm256_loadu_ps(k1 + 32);
_k1n = _mm256_loadu_ps(k1 + 40);
_k2 = _mm256_loadu_ps(k2 + 32);
_k2n = _mm256_loadu_ps(k2 + 40);
_k3 = _mm256_loadu_ps(k3 + 32);
_k3n = _mm256_loadu_ps(k3 + 40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3 + 8);
_k0 = _mm256_loadu_ps(k0 + 48);
_k0n = _mm256_loadu_ps(k0 + 56);
_k1 = _mm256_loadu_ps(k1 + 48);
_k1n = _mm256_loadu_ps(k1 + 56);
_k2 = _mm256_loadu_ps(k2 + 48);
_k2n = _mm256_loadu_ps(k2 + 56);
_k3 = _mm256_loadu_ps(k3 + 48);
_k3n = _mm256_loadu_ps(k3 + 56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm + 8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm + 8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm + 8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
float* outRow0 = out.row(j * 2);
float* outRow1 = out.row(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tile = out_tm.row(j * nRowBlocks + i);
float s0[4], s1[4], s2[4], s3[4];
float w0[4], w1[4];
float d0[2], d1[2], d2[2], d3[2];
float o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
const float* kernel4 = (const float*)kernel_tm.channel(p + 4);
const float* kernel5 = (const float*)kernel_tm.channel(p + 5);
const float* kernel6 = (const float*)kernel_tm.channel(p + 6);
const float* kernel7 = (const float*)kernel_tm.channel(p + 7);
float* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#if (defined _WIN32 && !(defined __MINGW32__))
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine = cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return(cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine = cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) exception;
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) memset(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale*=PerceptibleReciprocal(resize_filter->window_support);
/*
Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double) resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double) support);
if ((filter_type == CubicFilter) || (window_type == CubicFilter))
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)-
cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
Quantum
*magick_restrict p;
ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CopyPixels(const Quantum *source,const ssize_t source_offset,
Quantum *destination,const ssize_t destination_offset,const size_t channels)
{
ssize_t
i;
for (i=0; i < (ssize_t) channels; i++)
destination[channels*destination_offset+i]=source[source_offset*channels+i];
}
static inline void MixPixels(const Quantum *source,const ssize_t *source_offset,
const size_t source_size,Quantum *destination,
const ssize_t destination_offset,const size_t channels)
{
ssize_t
sum;
ssize_t
i;
for (i=0; i < (ssize_t) channels; i++)
{
ssize_t
j;
sum=0;
for (j=0; j < (ssize_t) source_size; j++)
sum+=source[source_offset[j]*channels+i];
destination[channels*destination_offset+i]=(Quantum) (sum/source_size);
}
}
static inline void Mix2Pixels(const Quantum *source,
const ssize_t source_offset1,const ssize_t source_offset2,
Quantum *destination,const ssize_t destination_offset,const size_t channels)
{
const ssize_t
offsets[2] = { source_offset1, source_offset2 };
MixPixels(source,offsets,2,destination,destination_offset,channels);
}
static inline int PixelsEqual(const Quantum *source1,ssize_t offset1,
const Quantum *source2,ssize_t offset2,const size_t channels)
{
ssize_t
i;
offset1*=channels;
offset2*=channels;
for (i=0; i < (ssize_t) channels; i++)
if (source1[offset1+i] != source2[offset2+i])
return(0);
return(1);
}
static inline void Eagle2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
i;
(void) source;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
if (PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,1,pixels,3,channels))
CopyPixels(pixels,0,result,0,channels);
if (PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels))
CopyPixels(pixels,2,result,1,channels);
if (PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels))
CopyPixels(pixels,6,result,2,channels);
if (PixelsEqual(pixels,5,pixels,8,channels) &&
PixelsEqual(pixels,8,pixels,7,channels))
CopyPixels(pixels,8,result,3,channels);
}
static void Hq2XHelper(const unsigned int rule,const Quantum *source,
Quantum *destination,const ssize_t destination_offset,const size_t channels,
const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d,
const ssize_t f,const ssize_t h)
{
#define caseA(N,A,B,C,D) \
case N: \
{ \
const ssize_t \
offsets[4] = { A, B, C, D }; \
\
MixPixels(source,offsets,4,destination,destination_offset,channels);\
break; \
}
#define caseB(N,A,B,C,D,E,F,G,H) \
case N: \
{ \
const ssize_t \
offsets[8] = { A, B, C, D, E, F, G, H }; \
\
MixPixels(source,offsets,8,destination,destination_offset,channels);\
break; \
}
switch (rule)
{
case 0:
{
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
caseA(1,e,e,e,a)
caseA(2,e,e,e,d)
caseA(3,e,e,e,b)
caseA(4,e,e,d,b)
caseA(5,e,e,a,b)
caseA(6,e,e,a,d)
caseB(7,e,e,e,e,e,b,b,d)
caseB(8,e,e,e,e,e,d,d,b)
caseB(9,e,e,e,e,e,e,d,b)
caseB(10,e,e,d,d,d,b,b,b)
case 11:
{
const ssize_t
offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b };
MixPixels(source,offsets,16,destination,destination_offset,channels);
break;
}
case 12:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[4] = { e, e, d, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 13:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, d, d, d, b, b, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 14:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b };
MixPixels(source,offsets,16,destination,destination_offset,channels);
}
else
CopyPixels(source,e,destination,destination_offset,channels);
break;
}
case 15:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[4] = { e, e, d, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 16:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, e, d, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 17:
{
if (PixelsEqual(source,b,source,d,channels))
{
const ssize_t
offsets[8] = { e, e, d, d, d, b, b, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, a };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
case 18:
{
if (PixelsEqual(source,b,source,f,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, b, b, d };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, d };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
default:
{
if (PixelsEqual(source,d,source,h,channels))
{
const ssize_t
offsets[8] = { e, e, e, e, e, d, d, b };
MixPixels(source,offsets,8,destination,destination_offset,channels);
}
else
{
const ssize_t
offsets[4] = { e, e, e, b };
MixPixels(source,offsets,4,destination,destination_offset,channels);
}
break;
}
}
#undef caseA
#undef caseB
}
static inline unsigned int Hq2XPatternToNumber(const int *pattern)
{
ssize_t
i;
unsigned int
result,
order;
result=0;
order=1;
for (i=7; i >= 0; i--)
{
result+=order*pattern[i];
order*=2;
}
return(result);
}
static inline void Hq2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
static const unsigned int
Hq2XTable[] =
{
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13,
4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14,
4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14,
4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14,
4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14
};
const int
pattern1[] =
{
!PixelsEqual(pixels,4,pixels,8,channels),
!PixelsEqual(pixels,4,pixels,7,channels),
!PixelsEqual(pixels,4,pixels,6,channels),
!PixelsEqual(pixels,4,pixels,5,channels),
!PixelsEqual(pixels,4,pixels,3,channels),
!PixelsEqual(pixels,4,pixels,2,channels),
!PixelsEqual(pixels,4,pixels,1,channels),
!PixelsEqual(pixels,4,pixels,0,channels)
};
#define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5]
const int pattern2[] = { Rotated(pattern1) };
const int pattern3[] = { Rotated(pattern2) };
const int pattern4[] = { Rotated(pattern3) };
#undef Rotated
(void) source;
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0,
channels,4,0,1,3,5,7);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1,
channels,4,2,5,1,7,3);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3,
channels,4,8,7,5,3,1);
Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2,
channels,4,6,3,7,1,5);
}
static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
#define Corner(A,B,C,D) \
{ \
if (intensities[B] > intensities[A]) \
{ \
ssize_t \
offsets[3] = { B, C, D }; \
\
MixPixels(pixels,offsets,3,result,3,channels); \
} \
else \
{ \
ssize_t \
offsets[3] = { A, B, C }; \
\
MixPixels(pixels,offsets,3,result,3,channels); \
} \
}
#define Line(A,B,C,D) \
{ \
if (intensities[C] > intensities[A]) \
Mix2Pixels(pixels,C,D,result,3,channels); \
else \
Mix2Pixels(pixels,A,B,result,3,channels); \
}
MagickFloatType
intensities[9];
int
ae,
bd,
ab,
ad,
be,
de;
ssize_t
i;
ssize_t
offsets[4] = { 0, 1, 3, 4 };
for (i=0; i < 9; i++)
intensities[i]=GetPixelIntensity(source,pixels + i*channels);
CopyPixels(pixels,0,result,0,channels);
CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result,
1,channels);
CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result,
2,channels);
ae=PixelsEqual(pixels,0,pixels,4,channels);
bd=PixelsEqual(pixels,1,pixels,3,channels);
ab=PixelsEqual(pixels,0,pixels,1,channels);
de=PixelsEqual(pixels,3,pixels,4,channels);
ad=PixelsEqual(pixels,0,pixels,3,channels);
be=PixelsEqual(pixels,1,pixels,4,channels);
if (ae && bd && ab)
{
CopyPixels(pixels,0,result,3,channels);
return;
}
if (ad && de && !ab)
{
Corner(1,0,4,3)
return;
}
if (be && de && !ab)
{
Corner(0,1,3,4)
return;
}
if (ad && ab && !be)
{
Corner(4,3,1,0)
return;
}
if (ab && be && !ad)
{
Corner(3,0,4,1)
return;
}
if (ae && (!bd || intensities[1] > intensities[0]))
{
Mix2Pixels(pixels,0,4,result,3,channels);
return;
}
if (bd && (!ae || intensities[0] > intensities[1]))
{
Mix2Pixels(pixels,1,3,result,3,channels);
return;
}
if (ab)
{
Line(0,1,3,4)
return;
}
if (de)
{
Line(3,4,0,1)
return;
}
if (ad)
{
Line(0,3,1,4)
return;
}
if (be)
{
Line(1,4,0,3)
return;
}
MixPixels(pixels,offsets,4,result,3,channels);
#undef Corner
#undef Line
}
static void Xbr2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
#define WeightVar(M,N) const int w_##M##_##N = \
PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1;
WeightVar(12,11)
WeightVar(12,7)
WeightVar(12,13)
WeightVar(12,17)
WeightVar(12,16)
WeightVar(12,8)
WeightVar(6,10)
WeightVar(6,2)
WeightVar(11,7)
WeightVar(11,17)
WeightVar(11,5)
WeightVar(7,13)
WeightVar(7,1)
WeightVar(12,6)
WeightVar(12,18)
WeightVar(8,14)
WeightVar(8,2)
WeightVar(13,17)
WeightVar(13,9)
WeightVar(7,3)
WeightVar(16,10)
WeightVar(16,22)
WeightVar(17,21)
WeightVar(11,15)
WeightVar(18,14)
WeightVar(18,22)
WeightVar(17,23)
WeightVar(17,19)
#undef WeightVar
if (
w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) <
w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6)
)
Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0,
channels);
else
CopyPixels(pixels,12,result,0,channels);
if (
w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) <
w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8)
)
Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1,
channels);
else
CopyPixels(pixels,12,result,1,channels);
if (
w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) <
w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16)
)
Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2,
channels);
else
CopyPixels(pixels,12,result,2,channels);
if (
w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) <
w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18)
)
Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3,
channels);
else
CopyPixels(pixels,12,result,3,channels);
}
static void Scale2X(const Image *source,const Quantum *pixels,Quantum *result,
const size_t channels)
{
if (PixelsEqual(pixels,1,pixels,7,channels) ||
PixelsEqual(pixels,3,pixels,5,channels))
{
ssize_t
i;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
return;
}
if (PixelsEqual(pixels,1,pixels,3,channels))
CopyPixels(pixels,3,result,0,channels);
else
CopyPixels(pixels,4,result,0,channels);
if (PixelsEqual(pixels,1,pixels,5,channels))
CopyPixels(pixels,5,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
if (PixelsEqual(pixels,3,pixels,7,channels))
CopyPixels(pixels,3,result,2,channels);
else
CopyPixels(pixels,4,result,2,channels);
if (PixelsEqual(pixels,5,pixels,7,channels))
CopyPixels(pixels,5,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
}
static void Epbx2X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
#define HelperCond(a,b,c,d,e,f,g) ( \
PixelsEqual(pixels,a,pixels,b,channels) && ( \
PixelsEqual(pixels,c,pixels,d,channels) || \
PixelsEqual(pixels,c,pixels,e,channels) || \
PixelsEqual(pixels,a,pixels,f,channels) || \
PixelsEqual(pixels,b,pixels,g,channels) \
) \
)
ssize_t
i;
for (i=0; i < 4; i++)
CopyPixels(pixels,4,result,i,channels);
if (
!PixelsEqual(pixels,3,pixels,5,channels) &&
!PixelsEqual(pixels,1,pixels,7,channels) &&
(
PixelsEqual(pixels,4,pixels,3,channels) ||
PixelsEqual(pixels,4,pixels,7,channels) ||
PixelsEqual(pixels,4,pixels,5,channels) ||
PixelsEqual(pixels,4,pixels,1,channels) ||
(
(
!PixelsEqual(pixels,0,pixels,8,channels) ||
PixelsEqual(pixels,4,pixels,6,channels) ||
PixelsEqual(pixels,3,pixels,2,channels)
) &&
(
!PixelsEqual(pixels,6,pixels,2,channels) ||
PixelsEqual(pixels,4,pixels,0,channels) ||
PixelsEqual(pixels,4,pixels,8,channels)
)
)
)
)
{
if (HelperCond(1,3,4,0,8,2,6))
Mix2Pixels(pixels,1,3,result,0,channels);
if (HelperCond(5,1,4,2,6,8,0))
Mix2Pixels(pixels,5,1,result,1,channels);
if (HelperCond(3,7,4,6,2,0,8))
Mix2Pixels(pixels,3,7,result,2,channels);
if (HelperCond(7,5,4,8,0,6,2))
Mix2Pixels(pixels,7,5,result,3,channels);
}
#undef HelperCond
}
static inline void Eagle3X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
corner_tl,
corner_tr,
corner_bl,
corner_br;
corner_tl=PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,0,pixels,3,channels);
corner_tr=PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels);
corner_bl=PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels);
corner_br=PixelsEqual(pixels,5,pixels,7,channels) &&
PixelsEqual(pixels,7,pixels,8,channels);
CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels);
if (corner_tl && corner_tr)
Mix2Pixels(pixels,0,2,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels);
if (corner_tl && corner_bl)
Mix2Pixels(pixels,0,6,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
if (corner_tr && corner_br)
Mix2Pixels(pixels,2,8,result,5,channels);
else
CopyPixels(pixels,4,result,5,channels);
CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels);
if (corner_bl && corner_br)
Mix2Pixels(pixels,6,8,result,7,channels);
else
CopyPixels(pixels,4,result,7,channels);
CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels);
}
static inline void Eagle3XB(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
ssize_t
corner_tl,
corner_tr,
corner_bl,
corner_br;
corner_tl=PixelsEqual(pixels,0,pixels,1,channels) &&
PixelsEqual(pixels,0,pixels,3,channels);
corner_tr=PixelsEqual(pixels,1,pixels,2,channels) &&
PixelsEqual(pixels,2,pixels,5,channels);
corner_bl=PixelsEqual(pixels,3,pixels,6,channels) &&
PixelsEqual(pixels,6,pixels,7,channels);
corner_br=PixelsEqual(pixels,5,pixels,7,channels) &&
PixelsEqual(pixels,7,pixels,8,channels);
CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels);
CopyPixels(pixels,4,result,1,channels);
CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels);
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
CopyPixels(pixels,4,result,5,channels);
CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels);
CopyPixels(pixels,4,result,7,channels);
CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels);
}
static inline void Scale3X(const Image *source,const Quantum *pixels,
Quantum *result,const size_t channels)
{
if (!PixelsEqual(pixels,1,pixels,7,channels) &&
!PixelsEqual(pixels,3,pixels,5,channels))
{
if (PixelsEqual(pixels,3,pixels,1,channels))
CopyPixels(pixels,3,result,0,channels);
else
CopyPixels(pixels,4,result,0,channels);
if (
(
PixelsEqual(pixels,3,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,2,channels)
) ||
(
PixelsEqual(pixels,5,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,0,channels)
)
)
CopyPixels(pixels,1,result,1,channels);
else
CopyPixels(pixels,4,result,1,channels);
if (PixelsEqual(pixels,5,pixels,1,channels))
CopyPixels(pixels,5,result,2,channels);
else
CopyPixels(pixels,4,result,2,channels);
if (
(
PixelsEqual(pixels,3,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,6,channels)
) ||
(
PixelsEqual(pixels,3,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,0,channels)
)
)
CopyPixels(pixels,3,result,3,channels);
else
CopyPixels(pixels,4,result,3,channels);
CopyPixels(pixels,4,result,4,channels);
if (
(
PixelsEqual(pixels,5,pixels,1,channels) &&
!PixelsEqual(pixels,4,pixels,8,channels)
) ||
(
PixelsEqual(pixels,5,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,2,channels)
)
)
CopyPixels(pixels,5,result,5,channels);
else
CopyPixels(pixels,4,result,5,channels);
if (PixelsEqual(pixels,3,pixels,7,channels))
CopyPixels(pixels,3,result,6,channels);
else
CopyPixels(pixels,4,result,6,channels);
if (
(
PixelsEqual(pixels,3,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,8,channels)
) ||
(
PixelsEqual(pixels,5,pixels,7,channels) &&
!PixelsEqual(pixels,4,pixels,6,channels)
)
)
CopyPixels(pixels,7,result,7,channels);
else
CopyPixels(pixels,4,result,7,channels);
if (PixelsEqual(pixels,5,pixels,7,channels))
CopyPixels(pixels,5,result,8,channels);
else
CopyPixels(pixels,4,result,8,channels);
}
else
{
ssize_t
i;
for (i=0; i < 9; i++)
CopyPixels(pixels,4,result,i,channels);
}
}
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
const char
*option;
Image
*source_image,
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
rectangle;
ssize_t
y;
unsigned char
magnification,
width;
void
(*scaling_method)(const Image *,const Quantum *,Quantum *,size_t);
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
option=GetImageOption(image->image_info,"magnify:method");
if (option == (char *) NULL)
option="scale2x";
scaling_method=Scale2X;
magnification=1;
width=1;
switch (*option)
{
case 'e':
{
if (LocaleCompare(option,"eagle2x") == 0)
{
scaling_method=Eagle2X;
magnification=2;
width=3;
break;
}
if (LocaleCompare(option,"eagle3x") == 0)
{
scaling_method=Eagle3X;
magnification=3;
width=3;
break;
}
if (LocaleCompare(option,"eagle3xb") == 0)
{
scaling_method=Eagle3XB;
magnification=3;
width=3;
break;
}
if (LocaleCompare(option,"epbx2x") == 0)
{
scaling_method=Epbx2X;
magnification=2;
width=3;
break;
}
break;
}
case 'f':
{
if (LocaleCompare(option,"fish2x") == 0)
{
scaling_method=Fish2X;
magnification=2;
width=3;
break;
}
break;
}
case 'h':
{
if (LocaleCompare(option,"hq2x") == 0)
{
scaling_method=Hq2X;
magnification=2;
width=3;
break;
}
break;
}
case 's':
{
if (LocaleCompare(option,"scale2x") == 0)
{
scaling_method=Scale2X;
magnification=2;
width=3;
break;
}
if (LocaleCompare(option,"scale3x") == 0)
{
scaling_method=Scale3X;
magnification=3;
width=3;
break;
}
break;
}
case 'x':
{
if (LocaleCompare(option,"xbr2x") == 0)
{
scaling_method=Xbr2X;
magnification=2;
width=5;
}
break;
}
default:
break;
}
/*
Make a working copy of the source image and convert it to RGB colorspace.
*/
source_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (source_image == (Image *) NULL)
return((Image *) NULL);
offset.x=0;
offset.y=0;
rectangle.x=0;
rectangle.y=0;
rectangle.width=image->columns;
rectangle.height=image->rows;
(void) CopyImagePixels(source_image,image,&rectangle,&offset,exception);
(void) SetImageColorspace(source_image,RGBColorspace,exception);
magnify_image=CloneImage(source_image,magnification*source_image->columns,
magnification*source_image->rows,MagickTrue,exception);
if (magnify_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return((Image *) NULL);
}
/*
Magnify the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(source_image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,magnify_image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
Quantum
r[128]; /* to hold result pixels */
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y,
magnify_image->columns,magnification,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) source_image->columns; x++)
{
const Quantum
*magick_restrict p;
size_t
channels;
ssize_t
i;
ssize_t
j;
p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width,
exception);
channels=GetPixelChannels(source_image);
scaling_method(source_image,p,r,channels);
/*
Copy the result pixels into the final image.
*/
for (j=0; j < (ssize_t) magnification; j++)
for (i=0; i < (ssize_t) (channels*magnification); i++)
q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i];
q+=magnification*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) memset(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double x_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
const Quantum
*magick_restrict p;
ContributionInfo
*magick_restrict contribution;
Quantum
*magick_restrict q;
ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(
const ResizeFilter *magick_restrict resize_filter,
const Image *magick_restrict image,Image *magick_restrict resize_image,
const double y_factor,const MagickSizeType span,
MagickOffsetType *magick_restrict progress,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
const Quantum
*magick_restrict p;
ContributionInfo
*magick_restrict contribution;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
(*progress)++;
proceed=SetImageProgress(image,ResizeImageTag,*progress,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
filename[MagickPathExtent],
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
GetPathComponent(image->magick_filename,TailPath,filename);
(void) CopyMagickString(value,filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
(void) FormatImageProperty(thumbnail_image,"Thumb::MTime","%.20g",(double)
attributes.st_mtime);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
(void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL,
exception);
(void) FormatImageProperty(thumbnail_image,"Thumb::Image::Width","%.20g",
(double) image->magick_columns);
(void) FormatImageProperty(thumbnail_image,"Thumb::Image::Height","%.20g",
(double) image->magick_rows);
(void) FormatImageProperty(thumbnail_image,"Thumb::Document::Pages","%.20g",
(double) GetImageListLength(image));
return(thumbnail_image);
}
|
DRB005-indirectaccess1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program is extracted from a real application at LLNL.
Two pointers (xa1 and xa2) have a pair of values with a distance of 12.
They are used as start base addresses for two 1-D arrays.
Their index set has two indices with distance of 12: 999 +12 = 1011.
So there is loop carried dependence.
However, having loop carried dependence does not mean data races will always happen.
The iterations with loop carried dependence must be scheduled to
different threads in order for data races to happen.
In this example, we use schedule(static,1) to increase the chance that
the dependent loop iterations will be scheduled to different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 527, 529, 531,
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 923, // change original 921 to 923 = 911+12
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
// max index value is 2013. +1 to ensure a reference like base[2015]
// Pointers will never access the same offset as (xa2 = base + 2014).
double * base = (double*) malloc(sizeof(double)* (2013+1+2013+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 2014;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for simd
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
// default static even scheduling may not trigger data race, using static,1 instead.
#pragma omp parallel for simd schedule(static,1)
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0 + i;
xa2[idx]+= 3.0 + i;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
professor_challenge_optmized.c | /*
The number of primes up to 600000 is 49098 and took it 39s to solve it in parallel and 40s sequential
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <omp.h>
static const unsigned int verify_subdomain(const unsigned int start, const unsigned int chunck) {
unsigned int i = 0, j = 0, counter = 0, begin = 0;
begin = (0 == start%2) ? start+1: start;
/* Since and even numbers, except two, cannot be primes */
for(i = begin; i < start+chunck; i += 2) {
/* Since zero cannot dive any number, starts at two -- because one can divide any number */
for(j = 3; j < i; j += 2)
if(0 == i%j)
break;
if(j == i)
counter += 1;
}
return counter;
}
static const unsigned int n_primes_parallel(const unsigned int limit) {
/* counter is equal to one because of the two */
unsigned int i = 0, j = 0, counter = 1, this_thread = 0, n_threads = 0, start = 0, chunck = 0;
/* Dynamic don't show good results to this particular case */
omp_set_dynamic(0);
#pragma omp parallel default(shared) private(this_thread, n_threads, chunck, start) num_threads(2)
{
this_thread = omp_get_thread_num();
n_threads = omp_get_num_threads();
chunck = limit/n_threads;
start = this_thread*chunck;
if(n_threads-1 == this_thread)
chunck = limit-start;
counter += verify_subdomain(start, chunck);
printf("#%d out of %d\tstart at %d and checking up to %d\n", this_thread+1, n_threads, start, start+chunck);
}
return counter;
}
static const unsigned int n_primes_sequential(const unsigned int limit) {
unsigned int i = 0, j = 0, counter = 1, begin = 0;
for(i = 1; i < limit; i += 2) {
for(j = 3; j < i; j += 2)
if(0 == i%j)
break;
if(j == i)
counter += 1;
}
return counter;
}
int main(int argc, char **argv) {
/* Above that number the parallel doesn't performe better than the sequential */
const unsigned int limit = 600000;
unsigned int primes_parallel = 0, primes_sequential = 0;
float start_parallel = 0.0, end_parallel = 0.0, start_sequential = 0.0, end_sequential = 0.0;
start_parallel = clock()/CLOCKS_PER_SEC;
primes_parallel = n_primes_parallel(limit);
end_parallel = clock()/CLOCKS_PER_SEC;
start_sequential = clock()/CLOCKS_PER_SEC;
primes_sequential = n_primes_sequential(limit);
end_sequential = clock()/CLOCKS_PER_SEC;
if(primes_parallel == primes_sequential)
printf("\nThe number of primes up to %d is %d and took it %.0fs to solve it in parallel and %.0fs sequential\n", limit, primes_sequential, end_parallel - start_parallel, end_sequential - start_sequential);
else
printf("\nERROR\tWrong number of primes up to %d calculated:\n\tParallel: %d\n\tSequential: %d\n", limit, primes_parallel, primes_sequential);
return 0;
}
|
dle.c | /* Author: Christopher Hannon
* cs546 Parallel and Distributed Processing
* Homework 2
* Shared Memory Programming
*
* This Program implements 3 algorithms for solving
* dense linear equations of the
* form A*x=b, where A is an n*n matrix and b is a
* vector. This program performs guassian elimination
* without pivoting and back substitution.
*
* The 3 algorithms are sequential, parallel - Pthreads
* and parallel - OpenMP
*
* In the Parallel implementations (pthreads/openMP),
* The guassian elimination is performed in parallel
* while back substitution is performed sequentially.
* Normalization is done in the back substitution..
* i.e., diagionals are not normalized.
*
* ALGORITHM EXPLAINATIONS:
*
* Sequential:
* This algorithm is simple and consists of 3 for loops
* bounded by N.
*
* Pthreads:
* This algorithm is explained in the comments above the
* implementation function and in the PDF.
*
* OpenMP:
* This algorithm is explained in the comments above the
* implementation function and in the PDF.
*
*
* * Start Wolfram Mathworld Quote
* * http://mathworld.wolfram.com/GaussianElimination.html
*
* Guassian elimination is a method for solving matrix
* equations of the form: Ax=b
*
* To perform Guassian elimination starting with the system
* of equations:
* __ __ _ _ _ _
* | || | | |
* | a11 a12 ... a1k || x1 | | b1 |
* | a21 a22 ... a2k || x2 | = | b2 |
* | ............... || .. | | .. |
* | ak1 ak2 ... akk || xk | | bk |
* |__ __||_ _| |_ _|
*
* compose the "augmented matrix equation"
* __ __ _ _
* | || |
* | a11 a12 ... a1k | b1 || x1 |
* | a21 a22 ... a2k | b2 || x2 |
* | ............... | .. || .. |
* | ak1 ak2 ... akk | bk || xk |
* |__ __||_ _|
*
* Here, the column vector in the variables 'x' is carried
* along for labeling the matrix rows. Now, perform elementary
* row operations to put the augmented matrix into the upper
* triangular form
* __ __
* | |
* | a'11 a'12 ... a1'k | b'1 |
* | 0 a'22 ... a2'k | b'2 |
* | ................ | ... |
* | 0 0 ... a'kk | b'k |
* |__ __|
*
* Solve the equation of the kth row for xk, then substitute
* back into the equation of the (k-1)st row to obtain a
* solution for x(k-1), etc., according to the formula:
*
* xi = 1/a'ii( b'i - {\sum ^k _ (j=i+1)} a'ij*xj)
*
* * End Wolfram Mathworld Quote
* * http://mathworld.wolfram.com/GaussianElimination.html
*
*
*
* Exit Codes:
* 0 - Program executed successfully
* -1 - Incorrect arguments to program (see Usage)
* -2 - failed on pthreads
* -3 - failed on semaphores
* -4 - failed on openMP
*
* Usage:
* ./guass (0/1/2/3) (N)
* 0 - Sequential mode
* 1 - Pthreads mode
* 2 - OpenMP mode
* 3 - Test all (Not implemented)
*/
/* includes */
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include <semaphore.h>
#include <omp.h>
#include "dle.h"
/* arbitrarily choose max n of 1000 */
#define MAX_SIZE_OF_N 1000
/* arbitrarily choose max size */
#define MAX_SIZE_ELEMENT 10000
/* Global data */
float A[MAX_SIZE_OF_N][MAX_SIZE_OF_N];
float B[MAX_SIZE_OF_N];
float X[MAX_SIZE_OF_N];
int N;
sem_t Locks[MAX_SIZE_OF_N];
/* Pthreads struct for holding i and j */
struct pthread_data {
int i;
int j;
};
struct pthread_data pthread_data_array[MAX_SIZE_OF_N];
/* setup functions */
void setup(int argc, char **argv) {
int i,j;
if (argc < 3) {
usage();
exit(1); // error code 1 = incorrect args
}
N = atoi(argv[2]);
/* Check input data */
if (N > 1000) {
printf("Max size of array: 1000 x 1000\n");
N = 1000;
}
if (N < 1) {
printf("Min size of array: 1 x 1\n");
N = 1;
}
/* create data */
srand(time(NULL));
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
A[i][j]=((double)rand() /((double)RAND_MAX + 1) * MAX_SIZE_ELEMENT);
}
B[i] = ((double)rand() /((double)RAND_MAX + 1) * MAX_SIZE_ELEMENT);
}
}
void run(char **argv) {
switch(atoi(argv[1])) {
case 0 :
/* sequential */
sequential();
break;
case 1 :
/* Pthreads */
parallel_pthreads();
break;
case 2 :
/* OpenMP */
parallel_openMP();
break;
case 3 :
/* Test All */
printf("Not implemented.");
break;
default :
printf("Invalid argument for test\n");
usage();
exit(1);
}
}
void usage() {
printf("\nUsage: ./guass (0/1/2/3) (N)\n");
printf("0 - sequential mode\n");
printf("1 - Pthreads mode\n");
printf("2 - OpenMP mode\n");
printf("3 - Test all (not implemented)\n");
printf("N - size of A array\n\n");
}
/* Display functions */
void print_result() {
int i;
printf("\n\tResult: X = \n");
printf(" _ _ \n");
printf("| |\n");
for(i=0; i<N; i++) {
printf("| %10.2f |\n", X[i]);
}
printf("|_ _|\n");
}
void print_data() {
/* print data beautifully */
int i,j;
if (N > 20) {
return;
}
/* print top */
printf(" __");
for (i=0; i< N; i++) {
printf(" "); // 8 spaces
}
printf(" __\n");
printf("| ");
for (i=0; i< N; i++) {
printf(" "); // 8 spaces
}
printf(" |\n");
/* print data */
for (j=0; j<N; j++){ //row
printf("| ");
for (i=0; i<N; i++) {
printf("%10.2f ",A[j][i]);
}
printf("| %10.2f |\n", B[j]);
}
/* print bottom */
printf("|__");
for (i=0; i< N; i++) {
printf(" "); // 8 spaces
}
printf(" __|\n");
}
/* new clock */
/* timing code is from stackoverflow */
/* https://stackoverflow.com/questions/459691/best-timing-method-in-c */
int timeval_sub(struct timeval *result, struct timeval end, struct timeval start) {
if (start.tv_usec < end.tv_usec) {
int nsec = (end.tv_usec - start.tv_usec) / 1000000 + 1;
end.tv_usec -= 1000000 * nsec; end.tv_sec += nsec; } if (start.tv_usec - end.tv_usec > 1000000) { int nsec = (end.tv_usec - start.tv_usec) / 1000000; end.tv_usec += 1000000 * nsec; end.tv_sec -= nsec; } result->tv_sec = end.tv_sec - start.tv_sec; result->tv_usec = end.tv_usec - start.tv_usec;
return end.tv_sec < start.tv_sec;
}
float set_exec_time(int end) {
static struct timeval time_start;
struct timeval time_end;
struct timeval time_diff;
if (end) {
gettimeofday(&time_end, NULL);
if (timeval_sub(&time_diff, time_end, time_start) == 0) {
if (end == 1)
//printf("\nexec time: %1.2fs\n", time_diff.tv_sec + (time_diff.tv_usec / 1000000.0f));
return time_diff.tv_sec + (time_diff.tv_usec / 1000000.0f);
else if (end == 2)
printf("%1.2fs",
time_diff.tv_sec + (time_diff.tv_usec / 1000000.0f));
}
return -1;
}
gettimeofday(&time_start, NULL);
return 0;
}
void start_exec_timer() {
set_exec_time(0);
}
float print_exec_timer() {
return set_exec_time(1);
}
/* old clock */
clock_t getTime() {
return clock();
}
/* old clock */
float diffTime(clock_t t1, clock_t t2) {
return ((float)(t2 - t1) / (float)CLOCKS_PER_SEC ) * 1000;
}
/* Wrapper Functions */
void sequential() {
printf("\tExecuting sequentially.\n");
guass_seq();
print_data();
back_sub();
print_result();
}
void parallel_pthreads() {
printf("\tExecuting in Parallel using Pthreads.\n");
guass_pthreads2();
print_data();
back_sub();
print_result();
}
void parallel_openMP() {
printf("\tExecuting in Parallel using openMP.\n");
guass_openMP();
print_data();
back_sub();
print_result();
}
/* Main useful code */
void guass_seq() {
int norm, row, col;
float multiplier;
/* Guassian Elimination */
for (norm = 0; norm < N - 1; norm++) {
for (row = norm + 1; row < N; row++) {
multiplier = A[row][norm] / A[norm][norm];
for (col = norm; col < N; col++) {
A[row][col] -= A[norm][col] * multiplier;
}
B[row] -= B[norm] * multiplier;
}
}
}
void guass_pthreads2() {
/* Parallel implementaition using Pthreads w/o rounds */
int i,j;
pthread_t threads[N];
int rc; //return code from pthread_create and sem_init
/*Instead of using N-1 rounds, we embed the 'rounds'
* into the pthreads routine. We use N-1 semaphores to protect
* the columns that havent been reduced yet. we start with
* all sems are 'locked' and then the sem_k is 'unlocked' by
* pthread_k before it exits. */
/* init and lock semaphores*/
for(i=0; i<N; i++) {
/* int sem_init(sem_t *sem, int pshared, unsigned int value);
* calls can be sem_wait(), sem_try_wait(), sem_post() and sem_destroy() */
rc = sem_init(&Locks[i],1,0); // create with value 0 and shared
if(rc){
printf("ERROR!!!!; sem_init failed with %d.\n",rc);
exit(-3);
}
}
for(j=1;j<N+1;j++) { //O(N^2/p)
/* kickoff N-i-1 threads */
pthread_data_array[j].i=0; // not needed
pthread_data_array[j].j=j; //determines row
rc = pthread_create(&threads[j], NULL, poutine2, (void *) &pthread_data_array[j]);
if (rc){
printf("Error!!!!!; pthread_create failed with %d\n", rc);
exit(-2);
}
}
sem_post(&Locks[0]);
for(j=1;j<N+1;j++) {
/* join all pthreads */
pthread_join(threads[j],NULL);
}
}
void *poutine2 (void *pthreadarg) {
/* A Quebecois pthread routine */
int i,j,col,isLocked;
float mult;
struct pthread_data *loc_data;
loc_data = (struct pthread_data *) pthreadarg;
i = loc_data->i;
j = loc_data->j; //myrow
/* start at i and Use the i_th row to eliminate the i_th column
* of the j_th row */
for(i;i<j;i++){
//see if sem is unlocked
isLocked=0;
while(!isLocked){
sem_getvalue(&Locks[i],&isLocked);
}
/* modify A */
mult = A[j][i]/A[i][i];
for(col=i;col<N+1;col++) {
A[j][col] -= mult * A[i][col];
}
B[j]-= B[i] * mult;
}
sem_post(&Locks[j]);
/* can call pthread_exit here or else is implied */
pthread_exit(NULL);
}
void guass_pthreads() {
/* Parallel implementation using Pthreads */
int i, j;
pthread_t threads[N];
int rc; //return code from pthread_create
/*
* we can have N-1 rounds where in each round
* a column is eliminated, each row is ran
* in parallel in a manager/worker paradigm
*
* x x x x x x x x x x x x x x x x x x x x x x x x x
* x x x x x o x x x x o x x x x o x x x x o x x x x
* x x x x x --> o x x x x --> o o x x x --> o o x x x --> o o x x x
* x x x x x o x x x x o o x x x o o o x x o o o x x
* x x x x x o x x x x o o x x x o o o x x o o o o x
* 0 1 2 3 4
*/
for(i=0; i<N-1; i++) { //O(N)
/* Rounds:
* use the i_th row to to remove the i_th
* column of the j_th row
*/
for(j=i+1;j<N+1;j++) { //O(N^2/p)
/* kickoff N-i-1 threads */
pthread_data_array[j].i=i;
pthread_data_array[j].j=j;
rc = pthread_create(&threads[j], NULL, poutine, (void *) &pthread_data_array[j]);
if (rc){
printf("Error!!!!!; pthread_create failed with %d\n", rc);
exit(-2);
}
}
for(j=i+1;j<N+1;j++) {
/* join all pthreads */
pthread_join(threads[j],NULL);
}
}
}
void *poutine (void *pthreadarg) {
/* A Quebecois pthread routine */
int i,j,col;
float mult;
struct pthread_data *loc_data;
loc_data = (struct pthread_data *) pthreadarg;
i = loc_data->i;
j = loc_data->j;
/* Use the i_th row to eliminate the i_th column
* of the j_th row */
/* modify A */
mult = A[j][i]/A[i][i];
for(col=i;col<N+1;col++) {
A[j][col] -= mult * A[i][col];
}
B[j]-= B[i] * mult;
/* can call pthread_exit here or else is implied */
pthread_exit(NULL);
}
void guass_openMP() {
/* The OpenMP implementation is similar to the pthreads 1 implemenation */
int i,j,col;
float mult;
for(i=0; i<N-1; i++) { //O(N)
#pragma omp parallel num_threads(8) default(shared) private(j,col,mult)
/* Rounds:
* use the i_th row to to remove the i_th
* column of the j_th row
*/
for(j=i+1;j<N+1;j++) { //O(N^2/p)
/* modify A */
mult = A[j][i]/A[i][i];
for(col=i;col<N+1;col++) {
A[j][col] -= mult * A[i][col];
}
B[j]-= B[i] * mult;
}
#pragma omp barrier
#pragma omp single
printf("Round %d finished.\n",i);
}
}
void back_sub() {
int row,col;
/* Back Substitution */
for (row = N-1; row >= 0; row--) {
X[row] = B[row];
for (col = N-1; col > row; col --){
X[row] -= A[row][col] * X[col];
}
X[row] /= A[row][row];
}
}
/* REPL */
int main(int argc, char **argv) {
float totalTime;
//clock_t startTime,endTime;
/* Main routine */
printf("\n\n\n---------------------------------------------------------------------------------------------------------------\n");
printf("cs 546 HW 2 Shared Memory Programming.\n");
printf("\nStep 1: initializing.\n");
setup(argc,argv);
printf("\tGenerated data: [A|b] =\n");
print_data();
printf("\tStarting Timer\n");
//startTime=getTime();
start_exec_timer();
printf("\nStep 2: running...\n");
run(argv);
//endTime=getTime();
totalTime = print_exec_timer();
printf("\nTotal Time Spent: %15.6f s\n", totalTime);//diffTime(startTime,endTime));
printf("\n---------------------------------------------------------------------------------------------------------------\n");
/* exit program */
return 0;
}
|
target_data.c | #pragma omp target data clause [clauses]
structured-block
|
HABANA_nms_class.h | #ifndef NMS_F_H
#define NMS_F_H
#include <iostream>
#include <vector>
#include <math.h>
#include <synapse_types.h>
#include <algorithm>
#include <atomic>
#include <HABANA_threadedTask.h>
#include <functional>
#include <omp.h>
#define MAX_BOX_NUM (15130UL)
#define MAX_NUM_OF_FINAL_DET (200U)
#define NUM_OF_CLASSES_MINUS1 (80UL)
#define USE_OPENMP
namespace fastNMS
{
struct sortingPair
{
int16_t m_score;
uint16_t m_boxIndex;
};
struct DetectionBox
{
float m_x1;
float m_y1;
float m_x2;
float m_y2;
int16_t m_score;
int16_t m_classId;
float m_area;
DetectionBox() = default;
//...LG_REVIEW disbale the default ctor
DetectionBox(sortingPair pairIn, int16_t classIdIn, float *boxPtr, float *xywh_ptr, float scale_xy, float scale_wh) : m_score(pairIn.m_score), m_classId(classIdIn), m_area(0.0)
{
float x,y,w,h;
size_t boxIdx = pairIn.m_boxIndex;
m_x1 = boxPtr[boxIdx + 0*MAX_BOX_NUM];
m_y1 = boxPtr[boxIdx + 1*MAX_BOX_NUM];
m_x2 = boxPtr[boxIdx + 2*MAX_BOX_NUM];
m_y2 = boxPtr[boxIdx + 3*MAX_BOX_NUM];
m_x1*=scale_xy; m_y1*=scale_xy; m_x2*=scale_wh; m_y2*=scale_wh;
x = xywh_ptr[boxIdx*4+0];
y = xywh_ptr[boxIdx*4+1];
w = xywh_ptr[boxIdx*4+2];
h = xywh_ptr[boxIdx*4+3];
m_x1 = (m_x1*w) + x;
m_y1 = (m_y1*h) + y;
m_x2 = expf(m_x2) * w;
m_y2 = expf(m_y2) * h;
float left = m_x1 - 0.5 * m_x2;
float top = m_y1 - 0.5 * m_y2;
float right = m_x1 + 0.5 * m_x2;
float bott = m_y1 + 0.5 * m_y2;
m_x1 = left;
m_y1 = top;
m_x2 = right;
m_y2 = bott;
m_area = (m_x2-m_x1)*(m_y2-m_y1);
}
~DetectionBox() {}
float intersection_area(const DetectionBox & rhs) const
{
float n1 = std::max(rhs.m_x1, m_x1);
float n2 = std::min(rhs.m_x2, m_x2);
float m1 = std::max(rhs.m_y1, m_y1);
float m2 = std::min(rhs.m_y2, m_y2);
return (std::max(0.0f, (n2 - n1)) * std::max(0.0f, (m2 - m1)));
}
bool IOU(const DetectionBox & rhs, float threshold) const
{
float inter_area = intersection_area(rhs);
float threshold_inter = 1.0f+threshold;
if (threshold_inter * inter_area < threshold*(m_area + rhs.m_area))
return false;
return true;
}
};
class executeNms
{
private:
sortingPair m_classIdSortingVect[MAX_BOX_NUM];
std::vector<DetectionBox> m_detectionPlaceHolder;
std::vector<DetectionBox *> m_sortingVector;
std::vector<DetectionBox*>::iterator m_lastDet;
float *m_xywh_ptr;
float m_scale_xy;
float m_scale_wh;
float m_criteria;
int16_t m_score_threshold;
uint32_t m_startClassId;
uint32_t m_endClassId;
int16_t *m_score_ptr;
float *m_box_ptr;
static bool cmpPair(sortingPair l, sortingPair r)
{
return (l.m_score > r.m_score);
}
static bool cmpPointer(DetectionBox *l, DetectionBox *r)
{
return (l->m_score > r->m_score);
}
uint32_t addnewPairs(int16_t *score_ptr)
{
uint16_t boxNum;
uint32_t count=0;
int16_t score_threshold = m_score_threshold;
sortingPair *ptr = m_classIdSortingVect;
for(boxNum = 0; boxNum < MAX_BOX_NUM;boxNum++)
{
if(score_ptr[boxNum] > score_threshold)
{
ptr->m_score = score_ptr[boxNum];
ptr->m_boxIndex = boxNum;
ptr++;
}
}
count = std::distance(m_classIdSortingVect,ptr);
if(count < 2)
return count;
std::sort(m_classIdSortingVect,ptr,cmpPair);
return std::min(count,MAX_NUM_OF_FINAL_DET);
}
void sortAndFixup(int16_t classIdIn,
float *boxPtr,
uint32_t numOfSortedElems,
std::vector<DetectionBox>::iterator &end,
std::vector<DetectionBox*>::iterator &end_ptr)
{
std::vector<sortingPair>::iterator sortedIt;
uint32_t sortedIndex;
float *xywh_ptr = m_xywh_ptr;
float scale_xy = m_scale_xy;
float scale_wh = m_scale_wh;
for(sortedIndex =0; sortedIndex < numOfSortedElems;sortedIndex++)
{
*end = DetectionBox(m_classIdSortingVect[sortedIndex],classIdIn,boxPtr,xywh_ptr,scale_xy,scale_wh);
*end_ptr = &*end;
end++;
end_ptr++;
}
}
void classNMS(std::vector<DetectionBox*>::iterator &start_ptr,
std::vector<DetectionBox*>::iterator &end_ptr)
{
std::vector<DetectionBox*>::iterator check;
std::vector<DetectionBox*>::iterator copy;
float criteria = m_criteria;
while(start_ptr != end_ptr)
{
check = start_ptr+1;
if(check == end_ptr)
break;
copy = start_ptr+1;
do
{
if((*start_ptr)->IOU(**check,criteria) == false)
{
*copy = *check;
copy++;
}
check++;
} while (check != end_ptr);
*copy = nullptr; //debug marker
end_ptr = copy;
start_ptr++;
}
start_ptr = end_ptr;
}
public:
executeNms(size_t numOfClasses,
float *xywh_ptr,
float scale_xy,
float scale_wh,
float criteria,
int16_t score_threshold,
uint32_t startClassId,
uint32_t endClassId)
{
m_detectionPlaceHolder.resize((MAX_NUM_OF_FINAL_DET)*numOfClasses);
m_sortingVector.resize((MAX_NUM_OF_FINAL_DET)*numOfClasses);
m_lastDet = m_sortingVector.begin();
m_xywh_ptr = xywh_ptr;
m_scale_xy = scale_xy;
m_scale_wh = scale_wh;
m_criteria = criteria;
m_startClassId = startClassId;
m_endClassId = endClassId;
m_score_threshold = score_threshold;
m_box_ptr = nullptr;
m_score_ptr = nullptr;
}
executeNms(executeNms &&other) : m_detectionPlaceHolder(std::move(other.m_detectionPlaceHolder)),
m_sortingVector(std::move(other.m_sortingVector))
{
memcpy(m_classIdSortingVect,other.m_classIdSortingVect, MAX_BOX_NUM*sizeof(sortingPair));
m_xywh_ptr = other.m_xywh_ptr;
m_box_ptr = other.m_box_ptr;
m_criteria = other.m_criteria;
m_endClassId = other.m_endClassId;
m_lastDet = other.m_lastDet;
m_scale_wh = other.m_scale_wh;
m_scale_xy = other.m_scale_xy;
m_score_ptr = other.m_score_ptr;
m_score_threshold = other.m_score_threshold;
m_startClassId = other.m_startClassId;
}
executeNms(const executeNms& other) = delete;
executeNms& operator=(const executeNms & other) = delete;
void setParamsForProcess(int16_t *score_ptr,
float *box_ptr)
{
m_score_ptr = score_ptr;
m_box_ptr = box_ptr;
}
void processClasses()
{
size_t classId;
size_t boxNum;
std::vector<DetectionBox>::iterator end = m_detectionPlaceHolder.begin();
std::vector<DetectionBox*>::iterator start_ptr = m_sortingVector.begin();
std::vector<DetectionBox*>::iterator end_ptr = m_sortingVector.begin();
uint32_t numOfElem;
m_lastDet = m_sortingVector.begin();
for(classId = m_startClassId; classId < m_endClassId; classId++)
{
numOfElem = addnewPairs(&m_score_ptr[classId*MAX_BOX_NUM]);
if(numOfElem > 0)
{
sortAndFixup(classId,m_box_ptr, numOfElem,end, end_ptr);
//*end_ptr = nullptr; //debug marker
classNMS(start_ptr, end_ptr);
}
}
m_lastDet = end_ptr;
}
bool selfSort(std::vector<DetectionBox> &result)
{
int32_t i;
if(m_lastDet == m_sortingVector.begin())
return false;
std::vector<DetectionBox*>::iterator end_ptr = m_lastDet, start_ptr;
m_lastDet = m_sortingVector.begin();
std::stable_sort(m_sortingVector.begin(),end_ptr,cmpPointer);
for(i=0,start_ptr = m_sortingVector.begin();(i < MAX_NUM_OF_FINAL_DET) && (start_ptr != end_ptr);start_ptr++,i++)
{
result.push_back(**start_ptr);
}
return true;
}
void extMerge(std::vector<DetectionBox*> &mergeVector)
{
if(m_lastDet == m_sortingVector.begin())
return;
std::vector<DetectionBox*>::iterator copyIt, endPtr = m_lastDet;
m_lastDet = m_sortingVector.begin();
for(copyIt = m_sortingVector.begin(); copyIt != endPtr;copyIt++)
{
mergeVector.push_back(*copyIt);
}
}
uint32_t getSortingBufSize()
{
return std::distance(m_sortingVector.begin(),m_lastDet);
}
};
class nmsThreadRunnerOmp
{
public:
nmsThreadRunnerOmp(uint32_t maxThreadNum,
float *xywh_ptr,
float scale_xy,
float scale_wh,
float criteria,
int16_t score_threshold)
{
uint32_t step = NUM_OF_CLASSES_MINUS1/maxThreadNum;
m_partialClassNmsDecoders.reserve(maxThreadNum);
for(uint32_t i = 0; i < maxThreadNum;i++)
{
m_partialClassNmsDecoders.emplace_back(step,xywh_ptr,scale_xy,scale_wh,criteria,score_threshold,i * step + 1,(i+1) *step + 1);
}
m_mergedSortingVect.reserve(MAX_NUM_OF_FINAL_DET*NUM_OF_CLASSES_MINUS1);
}
nmsThreadRunnerOmp(const nmsThreadRunnerOmp&) = delete;
nmsThreadRunnerOmp& operator=(const nmsThreadRunnerOmp &) = delete;
static bool cmp(DetectionBox *l, DetectionBox *r)
{
return (l->m_score > r->m_score);
}
void multiThreadedNmsExecution(int16_t *score_ptr,
float *box_ptr,
std::vector<DetectionBox> &finalRes)
{
m_mergedSortingVect.clear();
size_t numOfThreads = m_partialClassNmsDecoders.size();
if(numOfThreads > 1)
{
for(int i=0; i < numOfThreads;i++)
m_partialClassNmsDecoders[i].setParamsForProcess(score_ptr,box_ptr);
#pragma omp parallel
{
size_t tid = omp_get_thread_num();
m_partialClassNmsDecoders[tid].processClasses();
}
std::vector<DetectionBox*>::iterator sortingVecIt = m_mergedSortingVect.begin();
for(int i=0;i < numOfThreads;i++)
{
if(m_partialClassNmsDecoders[i].getSortingBufSize() > 0)
m_partialClassNmsDecoders[i].extMerge(m_mergedSortingVect);
}
if(m_mergedSortingVect.size() > 0)
{
uint32_t i;
std::stable_sort(m_mergedSortingVect.begin(),m_mergedSortingVect.end(),cmp);
for(i=0,sortingVecIt = m_mergedSortingVect.begin();((i < MAX_NUM_OF_FINAL_DET) && (sortingVecIt != m_mergedSortingVect.end()));sortingVecIt++,i++)
{
finalRes.push_back(**sortingVecIt);
}
}
}
else
{
m_partialClassNmsDecoders[0].setParamsForProcess(score_ptr,box_ptr);
m_partialClassNmsDecoders[0].processClasses();
m_partialClassNmsDecoders[0].selfSort(finalRes);
}
}
private:
std::vector<executeNms> m_partialClassNmsDecoders;
std::vector<DetectionBox *> m_mergedSortingVect;
};
class nmsThreadRunnerTsk
{
public:
nmsThreadRunnerTsk(uint32_t maxThreadNum,
float *xywh_ptr,
float scale_xy,
float scale_wh,
float criteria,
int16_t score_threshold)
{
uint32_t step = NUM_OF_CLASSES_MINUS1/maxThreadNum;
m_partialClassNmsDecoders.reserve(maxThreadNum);
for(uint32_t i = 0; i < maxThreadNum;i++)
{
m_partialClassNmsDecoders.emplace_back(step,xywh_ptr,scale_xy,scale_wh,criteria,score_threshold,i * step + 1,(i+1) *step + 1);
}
m_mergedSortingVect.reserve(MAX_NUM_OF_FINAL_DET*NUM_OF_CLASSES_MINUS1);
if(maxThreadNum > 1)
{
m_threadPool.reserve(maxThreadNum);
for(uint32_t i = 0; i < maxThreadNum;i++)
{
m_processFunctors.push_back([i,this](){this->m_partialClassNmsDecoders[i].processClasses();});
m_postProcFunctors.push_back([this](){this->decDoneCnt();});
}
for(uint32_t i = 0; i < maxThreadNum;i++)
{
m_threadPool.emplace_back(m_processFunctors[i],m_postProcFunctors[i]);
}
}
}
~nmsThreadRunnerTsk()
{
while (m_threadPool.size() > 0)
{
m_threadPool.pop_back();
}
while (m_processFunctors.size() > 0)
{
m_processFunctors.pop_back();
}
while (m_postProcFunctors.size() > 0)
{
m_postProcFunctors.pop_back();
}
}
nmsThreadRunnerTsk(nmsThreadRunnerTsk &&other) = delete;
nmsThreadRunnerTsk& operator=(nmsThreadRunnerTsk &&other) = delete;
nmsThreadRunnerTsk() = delete;
nmsThreadRunnerTsk(const nmsThreadRunnerTsk&) = delete;
nmsThreadRunnerTsk& operator=(const nmsThreadRunnerTsk &) = delete;
static bool cmp(DetectionBox *l, DetectionBox *r)
{
return (l->m_score > r->m_score);
}
void multiThreadedNmsExecution(int16_t *score_ptr,
float *box_ptr,
std::vector<DetectionBox> &finalRes)
{
m_mergedSortingVect.clear();
size_t numOfThreads = m_threadPool.size();
if(numOfThreads > 1)
{
for(int32_t i = 0; i < numOfThreads;i++)
{
m_partialClassNmsDecoders[i].setParamsForProcess(score_ptr,box_ptr);
}
m_doneCnt = numOfThreads;
for(int32_t tid = 0; tid < numOfThreads;tid++)
{
m_threadPool[tid].sendTask();
}
while(m_doneCnt > 0);
std::vector<DetectionBox*>::iterator sortingVecIt = m_mergedSortingVect.begin();
for(int i=0;i < numOfThreads;i++)
{
if(m_partialClassNmsDecoders[i].getSortingBufSize() > 0)
m_partialClassNmsDecoders[i].extMerge(m_mergedSortingVect);
}
if(m_mergedSortingVect.size() > 0)
{
std::stable_sort(m_mergedSortingVect.begin(),m_mergedSortingVect.end(),cmp);
uint32_t i;
for(i=0,sortingVecIt = m_mergedSortingVect.begin();((i < MAX_NUM_OF_FINAL_DET) && (sortingVecIt != m_mergedSortingVect.end()));sortingVecIt++,i++)
{
finalRes.push_back(**sortingVecIt);
}
}
}
else
{
m_partialClassNmsDecoders[0].setParamsForProcess(score_ptr,box_ptr);
m_partialClassNmsDecoders[0].processClasses();
m_partialClassNmsDecoders[0].selfSort(finalRes);
}
}
private:
void decDoneCnt()
{
m_doneCnt--;
}
std::atomic<uint32_t> m_doneCnt;
std::vector<std::function<void(void)>> m_processFunctors;
std::vector<std::function<void(void)>> m_postProcFunctors;
std::vector<ThrTask::ThreadedTask<std::function<void(void)>,std::function<void(void)>>> m_threadPool;
std::vector<executeNms> m_partialClassNmsDecoders;
std::vector<DetectionBox *> m_mergedSortingVect;
};
};
#endif
|
matrixMultiplication.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include "mkl.h"
#include "papi.h"
#define THRESHOLD 32768 /* product size below which matmultleaf is used */
double matrixMultiplication_noBlock(int matrix_size, int nthreads){
srand(time(NULL));
double **A;//[matrix_size][matrix_size];
double **B;//[matrix_size][matrix_size];
double **C;//[matrix_size][matrix_size];
clock_t start_t, end_t;
double total_t;
int tid, nthread, i, j, k;
// allocate the memory
A = (double **)malloc(sizeof(double *)*matrix_size);
B = (double **)malloc(sizeof(double *)*matrix_size);
C = (double **)malloc(sizeof(double *)*matrix_size);
for(i =0; i < matrix_size; ++i){
A[i] = (double *)malloc(sizeof(double)*matrix_size);
B[i] = (double *)malloc(sizeof(double)*matrix_size);
C[i] = (double *)malloc(sizeof(double)*matrix_size);
}
//initialization
for (i = 0; i < matrix_size; ++i){
for (j = 0; j < matrix_size; ++j){
A[i][j] = (rand()%100)/100.0;
B[i][j] = (rand()%100)/100.0;
C[i][j] = 0.0;
}
}
start_t = clock();
omp_set_dynamic(0);
omp_set_num_threads(nthreads);
#pragma omp parallel shared(A, B, C, nthread) private(tid, i, j, k)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthread = omp_get_num_threads();
printf("Starting matrix multiplication with %d threads\n",nthread);
}
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
//printf("Thread %d starting matrix multiply...\n",tid);
#pragma omp for schedule(static)
//#pragma omp parallel for collapse(2)
for (i = 0; i < matrix_size; ++i)
{
for (j = 0; j < matrix_size; ++j)
{
C[i][j] = 0.0;
for (k = 0; k < matrix_size; ++k)
{
C[i][j] = C[i][j] + A[i][k]*B[k][j];
}
}
}
}
end_t = clock();
total_t = (double) (end_t - start_t) / CLOCKS_PER_SEC;
for(i =0; i < matrix_size; ++i){
free(A[i]);
free(B[i]);
free(C[i]);
}
free(A);
free(B);
free(C);
return total_t;
}
double matrixMultiplication_Block(int matrix_size, int block_size, int nthreads){
srand(time(NULL));
double **A;
double **B;
double **C;
clock_t start_t, end_t;
double total_t;
int tid, nthread, i, j, k, l, n;
A = (double **)malloc(sizeof(double *)*matrix_size);
B = (double **)malloc(sizeof(double *)*matrix_size);
C = (double **)malloc(sizeof(double *)*matrix_size);
for(i =0; i < matrix_size; ++i){
A[i] = (double *)malloc(sizeof(double)*matrix_size);
B[i] = (double *)malloc(sizeof(double)*matrix_size);
C[i] = (double *)malloc(sizeof(double)*matrix_size);
}
//initialization
for (i = 0; i < matrix_size; ++i){
for (j = 0; j < matrix_size; ++j){
A[i][j] = (rand()%100)/100.0;
B[i][j] = (rand()%100)/100.0;
C[i][j] = 0.0;
}
}
start_t = clock();
//omp_set_dynamic(0);
//omp_set_num_threads(nthreads);
#pragma omp parallel num_threads(nthreads) shared(A, B, C, nthread) private(tid, i, j, k, l, n)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Starting matrix multiple example with %d threads\n",nthreads);
}
//printf("Thread %d starting matrix multiply...\n",tid);
for (i = 0; i < matrix_size; i+=block_size)
{
for (j = 0; j < matrix_size; j += block_size)
{
//#pragma omp parallel for collapse(2)
#pragma omp for schedule(static)
for (l = 0; l < block_size; ++l)
{
for(n = 0; n < block_size; ++n)
{
for (k = 0; k < matrix_size; ++k)
{
//#pragma omp critical
C[i+l][j+n] += A[i+l][k]*B[k][j+k];
}
}
}
}
}
}
end_t = clock();
total_t = (double) (end_t - start_t) / CLOCKS_PER_SEC;
for(i =0; i < matrix_size; ++i){
free(A[i]);
free(B[i]);
free(C[i]);
}
free(A);
free(B);
free(C);
return total_t;
}
void matmultleaf(double **A, double ** B, double ** C, int mf, int ml, int nf, int nl, int pf, int pl){
/*
subroutine that uses the simple triple loop to multiply
a submatrix from A with a submatrix from B and store the
result in a submatrix of C.
*/
// mf, ml; /* first and last+1 i index */
// nf, nl; /* first and last+1 j index */
// pf, pl; /* first and last+1 k index */
int i, j, k;
for (i = 0; i < ml; ++i)
{
for (j = nf; j < nl; ++j)
{
C[i][j] = 0.0;
for (k = pf; k < pl; ++k)
{
C[i][j] += A[i][k]*B[k][j];
}
}
}
}
void matmultleaf_block(double **A, double ** B, double ** C, int mf, int ml, int nf, int nl, int pf, int pl, int block_size){
/*
subroutine that uses the simple triple loop to multiply
a submatrix from A with a submatrix from B and store the
result in a submatrix of C.
*/
// mf, ml; /* first and last+1 i index */
// nf, nl; /* first and last+1 j index */
// pf, pl; /* first and last+1 k index */
int i, j, k, l, n;
if(block_size >= 32){
block_size = 8;
}
for (i = 0; i < ml; i += block_size)
{
for (j = nf; j < nl; j += block_size)
{
C[i][j] = 0.0;
for (l = 0; l < block_size; ++l)
{
for(n = 0; n < block_size; ++n)
{
for (k = 0; k < pl; ++k)
{
C[i+l][j+n] += A[i+l][k]*B[k][j+k];
}
}
}
}
}
}
void splitMatrix(double **X, int m, double **Y, int mf, int nf){
int i;
for (i = 0; i < m; ++i)
{
X[i] = &Y[mf+i][nf];
}
}
void AddMatBlocks(double **T, int m, int n, double **X, double **Y)
{
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
T[i][j] = X[i][j] + Y[i][j];
}
void SubMatBlocks(double **T, int m, int n, double **X, double **Y)
{
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
T[i][j] = X[i][j] - Y[i][j];
}
void StrassenMM(double **A, double **B, double **C, int n, int flag, int block_size){
if (n*n*n < THRESHOLD)
{
if (flag == 1)
{
matmultleaf(A, B, C, 0, n, 0, n, 0, n);
}else{
matmultleaf_block(A, B, C, 0, n, 0, n, 0, n, block_size);
}
}
else
{
int m = n/2;
double ** M1 = (double **)malloc(sizeof(double*)*m);
double ** M2 = (double **)malloc(sizeof(double*)*m);
double ** M3 = (double **)malloc(sizeof(double*)*m);
double ** M4 = (double **)malloc(sizeof(double*)*m);
double ** M5 = (double **)malloc(sizeof(double*)*m);
double ** M6 = (double **)malloc(sizeof(double*)*m);
double ** M7 = (double **)malloc(sizeof(double*)*m);
double ** wAM1 = (double **)malloc(sizeof(double*)*m);
double ** wBM1 = (double **)malloc(sizeof(double*)*m);
double ** wAM2 = (double **)malloc(sizeof(double*)*m);
double ** wBM3 = (double **)malloc(sizeof(double*)*m);
double ** wBM4 = (double **)malloc(sizeof(double*)*m);
double ** wAM5 = (double **)malloc(sizeof(double*)*m);
double ** wAM6 = (double **)malloc(sizeof(double*)*m);
double ** wBM6 = (double **)malloc(sizeof(double*)*m);
double ** wAM7 = (double **)malloc(sizeof(double*)*m);
double ** wBM7 = (double **)malloc(sizeof(double*)*m);
double **A11 = new double*[m];
double **A12 = new double*[m];
double **A21 = new double*[m];
double **A22 = new double*[m];
double **B11 = new double*[m];
double **B12 = new double*[m];
double **B21 = new double*[m];
double **B22 = new double*[m];
double **C11 = new double*[m];
double **C12 = new double*[m];
double **C21 = new double*[m];
double **C22 = new double*[m];
int i;
for(i=0; i<m; i++){
M1[i] = (double *)malloc(sizeof(double)*m);
M2[i] = (double *)malloc(sizeof(double)*m);
M3[i] = (double *)malloc(sizeof(double)*m);
M4[i] = (double *)malloc(sizeof(double)*m);
M5[i] = (double *)malloc(sizeof(double)*m);
M6[i] = (double *)malloc(sizeof(double)*m);
M7[i] = (double *)malloc(sizeof(double)*m);
wAM1[i] = (double *)malloc(sizeof(double)*m);
wBM1[i] = (double *)malloc(sizeof(double)*m);
wAM2[i] = (double *)malloc(sizeof(double)*m);
wBM3[i] = (double *)malloc(sizeof(double)*m);
wBM4[i] = (double *)malloc(sizeof(double)*m);
wAM5[i] = (double *)malloc(sizeof(double)*m);
wAM6[i] = (double *)malloc(sizeof(double)*m);
wBM6[i] = (double *)malloc(sizeof(double)*m);
wAM7[i] = (double *)malloc(sizeof(double)*m);
wBM7[i] = (double *)malloc(sizeof(double)*m);
}
splitMatrix(A11, m, A, 0, 0);
splitMatrix(A12, m, A, 0, m);
splitMatrix(A21, m, A, m, 0);
splitMatrix(A22, m, A, m, m);
splitMatrix(B11, m, B, 0, 0);
splitMatrix(B12, m, B, 0, m);
splitMatrix(B21, m, B, m, 0);
splitMatrix(B22, m, B, m, m);
splitMatrix(C11, m, C, 0, 0);
splitMatrix(C12, m, C, 0, m);
splitMatrix(C21, m, C, m, 0);
splitMatrix(C22, m, C, m, m);
#pragma omp task
{
// M1 = (A11 + A22)*(B11 + B22)
AddMatBlocks(wAM1, m, m, A11, A22);
AddMatBlocks(wBM1, m, m, B11, B22);
StrassenMM(wAM1, wBM1, M1, m, flag, block_size);
}
#pragma omp task
{
//M2 = (A21 + A22)*B11
AddMatBlocks(wAM2, m, m, A21, A22);
StrassenMM(wAM2, B11, M2, m, flag, block_size);
}
#pragma omp task
{
//M3 = A11*(B12 - B22)
SubMatBlocks(wBM3, m, m, B12, B22);
StrassenMM(A11, wBM3, M3, m, flag, block_size);
}
#pragma omp task
{
//M4 = A22*(B21 - B11)
SubMatBlocks(wBM4, m, m, B21, B11);
StrassenMM(A22, wBM4, M4, m, flag, block_size);
}
#pragma omp task
{
//M5 = (A11 + A12)*B22
AddMatBlocks(wAM5, m, m, A11, A12);
StrassenMM(wAM5, B22, M5, m, flag, block_size);
}
#pragma omp task
{
//M6 = (A21 - A11)*(B11 + B12)
SubMatBlocks(wAM6, m, m, A21, A11);
AddMatBlocks(wBM6, m, m, B11, B12);
StrassenMM(wAM6, wBM6, M6, m, flag, block_size);
}
#pragma omp task
{
//M7 = (A12 - A22)*(B21 + B22)
SubMatBlocks(wAM7, m, m, A12, A22);
AddMatBlocks(wBM7, m, m, B21, B22);
StrassenMM(wAM7, wBM7, M7, m, flag, block_size);
}
#pragma omp taskwait
for (int i = 0; i < m; i++)
{
for (int j = 0; j < m; j++)
{
C11[i][j] = M1[i][j] + M4[i][j] - M5[i][j] + M7[i][j];
C12[i][j] = M3[i][j] + M5[i][j];
C21[i][j] = M2[i][j] + M4[i][j];
C22[i][j] = M1[i][j] - M2[i][j] + M3[i][j] + M6[i][j];
}
}
for(i=0; i<m; i++){
free(M1[i]);
free(M2[i]);
free(M3[i]);
free(M4[i]);
free(M5[i]);
free(M6[i]);
free(M7[i]);
free(wAM1[i]);
free(wBM1[i]);
free(wAM2[i]);
free(wBM3[i]);
free(wBM4[i]);
free(wAM5[i]);
free(wAM6[i]);
free(wBM6[i]);
free(wAM7[i]);
free(wBM7[i]);
}
free(M1);
free(M2);
free(M3);
free(M4);
free(M5);
free(M6);
free(M7);
free(wAM1);
free(wBM1);
free(wAM2);
free(wBM3);
free(wBM4);
free(wAM5);
free(wAM6);
free(wBM6);
free(wAM7);
free(wBM7);
delete[] A11; delete[] A12; delete[] A21; delete[] A22;
delete[] B11; delete[] B12; delete[] B21; delete[] B22;
delete[] C11; delete[] C12; delete[] C21; delete[] C22;
}
}
double matrixMultiplication_Recursive(int matrix_size, int block_size, int nthreads, int flag){
srand(time(NULL));
double **A;
double **B;
double **C;
clock_t start_t, end_t;
double total_t;
int tid, nthread, i, j, k, l, n;
A = (double **)malloc(sizeof(double*)*matrix_size);
B = (double **)malloc(sizeof(double*)*matrix_size);
C = (double **)malloc(sizeof(double*)*matrix_size);
for(i = 0; i < matrix_size; i++){
A[i] = (double *) malloc(sizeof(double)*matrix_size);
B[i] = (double *) malloc(sizeof(double)*matrix_size);
C[i] = (double *) malloc(sizeof(double)*matrix_size);
}
//initialization
for (i = 0; i < matrix_size; ++i){
for (j = 0; j < matrix_size; ++j){
A[i][j] = (rand()%100)/100.0;
B[i][j] = (rand()%100)/100.0;
C[i][j] = 0.0;
}
}
start_t = clock();
StrassenMM(A, B, C, matrix_size, flag, block_size);
end_t = clock();
total_t = (double) (end_t - start_t) / CLOCKS_PER_SEC;
return total_t;
}
double matrixMultiplication_mkl(int matrix_size, int nthreads){
srand(time(NULL));
double *A, *B, *C;
double alpha, beta;
clock_t start_t, end_t;
double total_t;
int i;
A = (double *)mkl_malloc(matrix_size*matrix_size*sizeof(double), 64);
B = (double *)mkl_malloc(matrix_size*matrix_size*sizeof(double), 64);
C = (double *)mkl_malloc(matrix_size*matrix_size*sizeof(double), 64);
for(i = 0; i < matrix_size*matrix_size; i++){
A[i] = (rand()%100)/100.0;
B[i] = (rand()%100)/100.0;
C[i] = 0.0;
}
start_t = clock();
omp_set_num_threads(nthreads);
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, matrix_size, matrix_size, matrix_size, alpha, A, matrix_size, B, matrix_size, beta, C, matrix_size);
end_t = clock();
total_t = (double) (end_t - start_t) / CLOCKS_PER_SEC;
return total_t;
}
int main(int argc, char *argv[]){
int matrix_size = 100;
if (argc > 1)
{
matrix_size = atoi(argv[1]);
printf("Set matrix size to %d\n", matrix_size);
}else{
printf("Input argument is wrong, pls follow the arguments like: matrix_size block_size, \n");
}
int block_size = 10;
if (argc > 2)
{
block_size = atoi(argv[2]);
if (0 != matrix_size%block_size)
{
printf("block_size cannot be divided by the matrix\n");
return -1;
}
printf("Set block size to %d\n", block_size);
}
//printf("row is %d\n", matrix_size);
int nthreads = 1;
if (argc > 3)
{
nthreads = atoi(argv[3]);
if (nthreads < 0)
{
printf("Set the number of threads is wrong\n");
return -1;
}
printf("Set the number of threads is %d\n", nthreads);
}
float real_time, proc_time, mflops;
long long flpins;
int retval;
printf("\n------------------------------------------------\n");
printf("Start three nested loops - no blocking\n");
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
double total_t = matrixMultiplication_noBlock(matrix_size, nthreads);
printf("Total time using three nested loops -matrix_size: %d, -threads: %d, -no blocking taken by CPU: %f\n", matrix_size, nthreads, total_t);
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
printf("Real_time:\t%f\nProc_time:\t%f\nTotal flpins:\t%lld\nMFLOPS:\t\t%f\n",
real_time, proc_time, flpins, mflops);
printf("\n------------------------------------------------\n");
printf("Start three nested loops - blocking\n");
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
total_t = matrixMultiplication_Block(matrix_size, block_size, nthreads);
printf("Total time using three nested loops -matrix_size: %d, -block size: %d, -thread: %d, taken by CPU: %f\n", matrix_size, block_size, nthreads, total_t);
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
printf("Real_time:\t%f\nProc_time:\t%f\nTotal flpins:\t%lld\nMFLOPS:\t\t%f\n",
real_time, proc_time, flpins, mflops);
printf("\n------------------------------------------------\n");
printf("Start Strassen MM -no blocking\n");
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
total_t = matrixMultiplication_Recursive(matrix_size, block_size, nthreads, 1);
printf("Total time using Strassen MM -matrix_size: %d, -no blocking, -thread: %d, taken by CPU: %f\n", matrix_size, nthreads, total_t);
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
printf("Real_time:\t%f\nProc_time:\t%f\nTotal flpins:\t%lld\nMFLOPS:\t\t%f\n",
real_time, proc_time, flpins, mflops);
printf("\n------------------------------------------------\n");
printf("Start Strassen MM - blocking\n");
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
total_t = matrixMultiplication_Recursive(matrix_size, block_size, nthreads, 2);
printf("Total time using Strassen MM -matrix_size: %d, -block size: 8, -thread: %d, taken by CPU: %f\n", matrix_size, nthreads, total_t);
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
printf("Real_time:\t%f\nProc_time:\t%f\nTotal flpins:\t%lld\nMFLOPS:\t\t%f\n",
real_time, proc_time, flpins, mflops);
printf("\n------------------------------------------------\n");
printf("Start MKL\n");
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
total_t = matrixMultiplication_mkl(matrix_size, nthreads);
printf("Total time using MKL -matrix_size: %d, -thread: %d, taken by CPU: %f\n", matrix_size, nthreads, total_t);
retval = PAPI_flops(&real_time, &proc_time, &flpins, &mflops);
printf("Real_time:\t%f\nProc_time:\t%f\nTotal flpins:\t%lld\nMFLOPS:\t\t%f\n",
real_time, proc_time, flpins, mflops);
return 0;
}
|
GB_unop__identity_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_bool)
// op(A') function: GB (_unop_tran__identity_uint16_bool)
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_bool)
(
uint16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nested_loop.c | /*
* This example illustrates nested loop parallelization with OpenMP
*
* @author Apan Qasem
*/
#include<stdlib.h>
#include<stdio.h>
#include <omp.h>
#define M 4
int main(int argc, char *argv[]) {
int N = atoi(argv[1]);
omp_set_num_threads(N);
int j, k;
#pragma omp parallel for private(j) collapse(2)
for (k = 0; k < M; k++)
for (j = 0; j < M; j++)
printf("I am thread %d in iteration (%d,%d)\n", omp_get_thread_num(), k,j);
return 0;
}
|
GB_unop__identity_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int8_uint64
// op(A') function: GB_unop_tran__identity_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int8_uint64
(
int8_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE
#endif
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifndef _TOOL_PREFIX
#define _TOOL_PREFIX ""
// If no _TOOL_PREFIX is set, we assume that we run as part of an OMPT test
#define _OMPT_TESTS
#endif
static const char *ompt_thread_t_values[] = {
"ompt_thread_UNDEFINED", "ompt_thread_initial", "ompt_thread_worker",
"ompt_thread_other"};
static const char *ompt_task_status_t_values[] = {
"ompt_task_UNDEFINED",
"ompt_task_complete", // 1
"ompt_task_yield", // 2
"ompt_task_cancel", // 3
"ompt_task_detach", // 4
"ompt_task_early_fulfill", // 5
"ompt_task_late_fulfill", // 6
"ompt_task_switch" // 7
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static const char *ompt_dependence_type_t_values[] = {
"ompt_dependence_type_UNDEFINED",
"ompt_dependence_type_in", // 1
"ompt_dependence_type_out", // 2
"ompt_dependence_type_inout", // 3
"ompt_dependence_type_mutexinoutset", // 4
"ompt_dependence_type_source", // 5
"ompt_dependence_type_sink", // 6
"ompt_dependence_type_inoutset" // 7
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_task_memory_t ompt_get_task_memory;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_finalize_tool_t ompt_finalize_tool;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame.ptr,
frame->enter_frame.ptr, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the compiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts a second NOP instruction (another 4 bytes). For non-void runtime
// functions Clang inserts a STW instruction (but only if compiling under
// -fno-PIC which will be the default with Clang 8.0, another 4 bytes).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8, ((char *)addr) - 12)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#elif KMP_ARCH_RISCV64
#if __riscv_compressed
// On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10)
#else
// On RV64G the NOP instruction is 4 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12)
#endif
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
#define register_callback_t(name, type) \
do { \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
} while (0)
#define register_callback(name) register_callback_t(name, name##_t)
#ifndef USE_PRIVATE_TOOL
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_last: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_prev: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
}
}
static void on_ompt_callback_reduction(ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_flush: codeptr_ra=%p\n",
thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_cancel: task_data=%" PRIu64
", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, task_data->value, first_flag_value,
second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num,
int flags)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(flags & ompt_task_initial)
{
char buffer[2048];
format_task_type(flags, buffer);
// Only check initial task not created by teams construct
if (team_size == 1 && thread_num == 1 && parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 ", flags=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num, flags);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num);
}
break;
case ompt_scope_end:
if(flags & ompt_task_initial){
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
}
break;
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_nest_lock: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
}
}
static void
on_ompt_callback_master(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_master_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64
", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
const char *size = (flag & ompt_parallel_team) ? "team_size" : "num_teams";
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_%s_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_%s=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, event, encountering_task_data->value,
encountering_task_frame->exit_frame.ptr,
encountering_task_frame->enter_frame.ptr, parallel_data->value, size,
requested_team_size, codeptr_ra, invoker);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_%s_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, event, parallel_data->value,
encountering_task_data->value, invoker, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
printf(
"%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_create: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"new_task_id=%" PRIu64
", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n",
ompt_get_thread_data()->value,
encountering_task_data ? encountering_task_data->value : 0,
encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL,
encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL,
new_task_data->value, codeptr_ra, buffer, type,
has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_schedule: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n",
ompt_get_thread_data()->value, first_task_data->value,
(second_task_data ? second_task_data->value : -1),
ompt_task_status_t_values[prior_task_status], prior_task_status);
if (prior_task_status == ompt_task_complete ||
prior_task_status == ompt_task_late_fulfill) {
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_end: task_id=%" PRIu64
"\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_dependences(
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps)
{
char buffer[2048];
char *progress = buffer;
for (int i = 0; i < ndeps && progress < buffer + 2000; i++) {
if (deps[i].dependence_type == ompt_dependence_type_source ||
deps[i].dependence_type == ompt_dependence_type_sink)
progress +=
sprintf(progress, "(%" PRIu64 ", %s), ", deps[i].variable.value,
ompt_dependence_type_t_values[deps[i].dependence_type]);
else
progress +=
sprintf(progress, "(%p, %s), ", deps[i].variable.ptr,
ompt_dependence_type_t_values[deps[i].dependence_type]);
}
if (ndeps > 0)
progress[-2] = 0;
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_dependences: task_id=%" PRIu64
", deps=[%s], ndeps=%d\n",
ompt_get_thread_data()->value, task_data->value, buffer, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_dependence_pair: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, first_task_data->value,
second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, ompt_thread_t_values[thread_type],
thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_thread_end: thread_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_control_tool: command=%" PRIu64
", modifier=%" PRIu64
", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, "
"current_task_frame.reenter=%p \n",
ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra,
omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr);
// the following would interfere with expected output for OMPT tests, so skip
#ifndef _OMPT_TESTS
// print task data
int task_level = 0;
ompt_data_t *task_data;
while (ompt_get_task_info(task_level, NULL, (ompt_data_t **)&task_data, NULL,
NULL, NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " task level %d: task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, task_level, task_data->value);
task_level++;
}
// print parallel data
int parallel_level = 0;
ompt_data_t *parallel_data;
while (ompt_get_parallel_info(parallel_level, (ompt_data_t **)¶llel_data,
NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " parallel level %d: parallel_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_level, parallel_data->value);
parallel_level++;
}
#endif
return 0; //success
}
int ompt_initialize(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool");
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_callback(ompt_callback_mutex_acquire);
register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_callback(ompt_callback_nest_lock);
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback_t(ompt_callback_reduction, ompt_callback_sync_region_t);
register_callback(ompt_callback_control_tool);
register_callback(ompt_callback_flush);
register_callback(ompt_callback_cancel);
register_callback(ompt_callback_implicit_task);
register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_callback(ompt_callback_work);
register_callback(ompt_callback_master);
register_callback(ompt_callback_parallel_begin);
register_callback(ompt_callback_parallel_end);
register_callback(ompt_callback_task_create);
register_callback(ompt_callback_task_schedule);
register_callback(ompt_callback_dependences);
register_callback(ompt_callback_task_dependence);
register_callback(ompt_callback_thread_begin);
register_callback(ompt_callback_thread_end);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
#ifdef __cplusplus
extern "C" {
#endif
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#ifdef __cplusplus
}
#endif
#endif // ifndef USE_PRIVATE_TOOL
#ifdef _OMPT_TESTS
#undef _OMPT_TESTS
#endif
|
itunes_fmt_plug.c | /* JtR format to crack encrypted iTunes Backup passwords.
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru at openwall.com>
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credit goes to Jean-Baptiste Bédrune, Jean Sigwald, DinoSec, philsmd,
* and Andrew Neitsch.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_itunes;
#elif FMT_REGISTERS_H
john_register_one(&fmt_itunes);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "pbkdf2_hmac_sha256.h"
#include "jumbo.h"
#include "memdbg.h"
#include "itunes_common.h"
#define FORMAT_LABEL "itunes-backup"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(uint64_t)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SSE_GROUP_SZ_SHA1 * SSE_GROUP_SZ_SHA256)
#define MAX_KEYS_PER_CRYPT (SSE_GROUP_SZ_SHA1 * SSE_GROUP_SZ_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests itunes_tests[] = {
// real iTunes 9.x hash
{"$itunes_backup$*9*bc707ac0151660426c8114d04caad9d9ee2678a7b7ab05c18ee50cafb2613c31c8978e8b1e9cad2a*10000*266343aaf99102ba7f6af64a3a2d62637793f753**", "123456"},
// artificial hashes generated by hashcat's tools/test.pl script with a low dpic (iterations) value to ease testing
{"$itunes_backup$*10*31021f9c5a705c3625af21739d397082d90f7a00718a9307687625abc35fc3e4d78371e95cc708b6*10000*8840233131165307147445064802216857558435*1000*c77a159b325d10efee51a1c05701ef63fb85b599", "855632538858211"},
{"$itunes_backup$*10*b3d3f05b5367345fcb654b9b628e2ed24d8b8726f1f74707a956c776475d6ebfffc962340d9cbbca*10000*6832814730342072666684158073107301064276*1000*46de5e844e0ee1c81d2cca6acefb77789c1a7cd0", "1"},
// real iTunes 9.x hash
{"$itunes_backup$*9*06dc04bca4eeea2fbc1bc7356fa758243bead479673640a668db285c8f48c402cc435539d935509e*10000*37d2bd7caefbb24a9729e41a3257ef06188dc01e**", "test123"},
// {"$itunes_backup$*10*deff6d646eb1fa2b6741efee8b70eda84341a838cef2bb10e582669759d7e33c399a0ba2a52cb9ec*10000*f09cfa82cc1695657cb2c347ee127c2523795fda*10000000*66f159e15f3ddbbdd4057f8babef7ad4472fac10", "test123"}, // real hash, this is very very slow!
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void itunes_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
if (cur_salt->version == 9) { // iTunes Backup < 10
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
int loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
for (i = 0; i < loops; i++)
pbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->salt, SALTLEN, cur_salt->iterations, pout + (i * SSE_GROUP_SZ_SHA1), 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, SALTLEN, cur_salt->iterations, master[i], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cracked[index+i] = itunes_common_decrypt(cur_salt, master[i]);
}
} else { // iTunes Backup 10.x
#if defined(SIMD_COEF_64) && defined(SIMD_COEF_32)
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
int loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA256;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
for (i = 0; i < loops; i++)
pbkdf2_sha256_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA256), &lens[i * SSE_GROUP_SZ_SHA256], cur_salt->dpsl, SALTLEN, cur_salt->dpic, pout + (i * SSE_GROUP_SZ_SHA256), 32, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 32;
pin[i] = (unsigned char*)master[i];
pout[i] = master[i];
}
loops = MAX_KEYS_PER_CRYPT / SSE_GROUP_SZ_SHA1;
for (i = 0; i < loops; i++)
pbkdf2_sha1_sse((const unsigned char**)(pin + i * SSE_GROUP_SZ_SHA1), &lens[i * SSE_GROUP_SZ_SHA1], cur_salt->salt, SALTLEN, cur_salt->iterations, pout + (i * SSE_GROUP_SZ_SHA1), 32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->dpsl, SALTLEN, cur_salt->dpic, master[i], 32, 0);
pbkdf2_sha1(master[i], 32, cur_salt->salt, SALTLEN, cur_salt->iterations, master[i], 32, 0);
}
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cracked[index+i] = itunes_common_decrypt(cur_salt, master[i]);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_itunes = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"version",
"iteration count",
},
{ FORMAT_TAG },
itunes_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
itunes_common_valid,
fmt_default_split,
fmt_default_binary,
itunes_common_get_salt,
{
itunes_common_tunable_version,
itunes_common_tunable_iterations,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
itunes_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__bclr_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int8)
// C=scalar+B GB (_bind1st__bclr_int8)
// C=scalar+B' GB (_bind1st_tran__bclr_int8)
// C=A+scalar GB (_bind2nd__bclr_int8)
// C=A'+scalar GB (_bind2nd_tran__bclr_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITCLR (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT8 || GxB_NO_BCLR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mhpTest6.c | void foo() {
int x;
#pragma omp barrier
foo();
}
void bar() {
int y;
#pragma omp barrier
bar();
}
int main() {
#pragma omp parallel
{
if (x > 2)
foo();
else
bar();
}
}
|
fvde_fmt_plug.c | /* JtR format to crack FileVault 2 hashes.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Big thanks to Omar Choudary, Felix Grobert and Joachim Metz for making this
* format possible.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_fvde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_fvde);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "aes.h"
#include "pbkdf2_hmac_sha256.h"
#include "jumbo.h"
#include "memdbg.h"
#include "fvde_common.h"
#define FORMAT_LABEL "FVDE"
#define FORMAT_NAME "FileVault 2"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 AES " SHA256_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static fvde_custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (fvde_custom_salt *)salt;
}
static void fvde_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* Unwrap data using AES Key Wrap (RFC3394)
*
* Translated from "AESUnwrap" function in aeswrap.py from https://github.com/dinosec/iphone-dataprotection project.
*
* The C implementation "aes_key_unwrap" in ramdisk_tools/bsdcrypto/key_wrap.c doesn't look any better.
*
* "libfvde_encryption_aes_key_unwrap" isn't great to look at either.
*/
static int fvde_decrypt(fvde_custom_salt *cur_salt, unsigned char *key)
{
uint64_t *C = cur_salt->blob.qword; // len(C) == 3
int n = 2; // len(C) - 1
uint64_t R[3]; // n + 1 = 3
union {
uint64_t qword[2];
unsigned char stream[16];
} todecrypt;
int i, j;
AES_KEY akey;
uint64_t A = C[0];
AES_set_decrypt_key(key, 128, &akey);
for (i = 0; i < n + 1; i++)
R[i] = C[i];
for (j = 5; j >= 0; j--) { // 5 is fixed!
for (i = 2; i >=1; i--) { // i = n
todecrypt.qword[0] = JOHNSWAP64(A ^ (n*j+i));
todecrypt.qword[1] = JOHNSWAP64(R[i]);
AES_ecb_encrypt(todecrypt.stream, todecrypt.stream, &akey, AES_DECRYPT);
A = JOHNSWAP64(todecrypt.qword[0]);
R[i] = JOHNSWAP64(todecrypt.qword[1]);
}
}
if (A == 0xa6a6a6a6a6a6a6a6ULL)
return 1; // success!
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][16];
int i;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, 16, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], 16, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cracked[index+i] = fvde_decrypt(cur_salt, master[i]);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_fvde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
fvde_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
fvde_common_valid,
fmt_default_split,
fmt_default_binary,
fvde_common_get_salt,
{
fvde_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
fvde_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
array_out_bound_fix.c | //#include <omp.h>
int main(){
int i = 0;
int *p = &i;
int a[12];
int b[12];
#pragma omp parallel for
for(i=1; i<5; i++){
b[i] = a[*p + i +1];
}
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <cstring>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <vector>
#include "./base.h"
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*! \brief session-id of each instance, optional */
std::vector<uint64_t> qids_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 2;
/*! \brief version that introduced qid field */
static const int kVersionQidAdded = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return root_index_.size() != 0 ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid;
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.AddBudget(inst[j].index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (bst_uint j = 0; j < inst.size(); ++j) {
builder.Push(
inst[j].index,
Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
void Push(const Inst &inst);
size_t Size() { return offset.Size() - 1; }
};
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() {}
virtual BatchIteratorImpl* Clone() = 0;
virtual SparsePage& operator*() = 0;
virtual const SparsePage& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); }
BatchIterator(const BatchIterator& other) {
if (other.impl_) {
impl_.reset(other.impl_->Clone());
} else {
impl_.reset();
}
}
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
SparsePage& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const SparsePage& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::unique_ptr<BatchIteratorImpl> impl_;
};
class BatchSet {
public:
explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {}
BatchIterator begin() { return begin_iter_; }
BatchIterator end() { return BatchIterator(nullptr); }
private:
BatchIterator begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
class DataSource : public dmlc::DataIter<SparsePage> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief A vector-like structure to represent set of rows.
* But saves the memory when all rows are in the set (common case in xgb)
*/
class RowSet {
public:
/*! \return i-th row index */
inline bst_uint operator[](size_t i) const;
/*! \return the size of the set. */
inline size_t Size() const;
/*! \brief push the index back to the set */
inline void PushBack(bst_uint i);
/*! \brief clear the set */
inline void Clear();
/*!
* \brief save rowset to file.
* \param fo The file to be saved.
*/
inline void Save(dmlc::Stream* fo) const;
/*!
* \brief Load rowset from file.
* \param fi The file to be loaded.
* \return if read is successful.
*/
inline bool Load(dmlc::Stream* fi);
/*! \brief constructor */
RowSet() = default;
private:
/*! \brief The internal data structure of size */
uint64_t size_{0};
/*! \brief The internal data structure of row set if not all*/
std::vector<bst_uint> rows_;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets row batches. Use range based for loop over BatchSet to access individual batches.
*/
virtual BatchSet GetRowBatches() = 0;
virtual BatchSet GetSortedColumnBatches() = 0;
virtual BatchSet GetColumnBatches() = 0;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
const size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
};
// implementation of inline functions
inline bst_uint RowSet::operator[](size_t i) const {
return rows_.size() == 0 ? static_cast<bst_uint>(i) : rows_[i];
}
inline size_t RowSet::Size() const {
return size_;
}
inline void RowSet::Clear() {
rows_.clear(); size_ = 0;
}
inline void RowSet::PushBack(bst_uint i) {
if (rows_.size() == 0) {
if (i == size_) {
++size_; return;
} else {
rows_.resize(size_);
for (size_t i = 0; i < size_; ++i) {
rows_[i] = static_cast<bst_uint>(i);
}
}
}
rows_.push_back(i);
++size_;
}
inline void RowSet::Save(dmlc::Stream* fo) const {
fo->Write(rows_);
fo->Write(&size_, sizeof(size_));
}
inline bool RowSet::Load(dmlc::Stream* fi) {
if (!fi->Read(&rows_)) return false;
if (rows_.size() != 0) return true;
return fi->Read(&size_, sizeof(size_)) == sizeof(size_);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
DMLC_DECLARE_TRAITS(has_saveload, xgboost::RowSet, true);
}
#endif // XGBOOST_DATA_H_
|
Example_affinity_display.2.c | /*
* @@name: affinity_display.2c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void socket_work(int socket_num, int n_thrds);
int main(void)
{
int n_sockets, socket_num, n_thrds_on_socket;
omp_set_nested(1); // or env var= OMP_NESTED=true
omp_set_max_active_levels(2); // or env var= OMP_MAX_ACTIVE_LEVELS=2
n_sockets = omp_get_num_places();
n_thrds_on_socket = omp_get_place_num_procs(0);
// OMP_NUM_THREADS=2,4
// OMP_PLACES="{0,2,4,6},{1,3,5,7}" #2 sockets; even/odd proc-ids
// OMP_AFFINITY_FORMAT=\
// "nest_level= %L, parent_thrd_num= %a, thrd_num= %n, thrd_affinity= %A"
#pragma omp parallel num_threads(n_sockets) private(socket_num)
{
socket_num = omp_get_place_num();
if(socket_num==0)
printf(" LEVEL 1 AFFINITIES 1 thread/socket, %d sockets:\n\n", n_sockets);
omp_display_affinity(NULL); // not needed if OMP_DISPLAY_AFFINITY=TRUE
// OUTPUT:
// LEVEL 1 AFFINITIES 1 thread/socket, 2 sockets:
// nest_level= 1, parent_thrd_num= 0, thrd_num= 0, thrd_affinity= 0,2,4,6
// nest_level= 1, parent_thrd_num= 0, thrd_num= 1, thrd_affinity= 1,3,5,7
socket_work(socket_num, n_thrds_on_socket);
}
return 0;
}
void socket_work(int socket_num, int n_thrds)
{
#pragma omp parallel num_threads(n_thrds)
{
if(omp_get_thread_num()==0)
printf(" LEVEL 2 AFFINITIES, %d threads on socket %d\n",n_thrds, socket_num);
omp_display_affinity(NULL); // not needed if OMP_DISPLAY_AFFINITY=TRUE
// OUTPUT:
// LEVEL 2 AFFINITIES, 4 threads on socket 0
// nest_level= 2, parent_thrd_num= 0, thrd_num= 0, thrd_affinity= 0
// nest_level= 2, parent_thrd_num= 0, thrd_num= 1, thrd_affinity= 2
// nest_level= 2, parent_thrd_num= 0, thrd_num= 2, thrd_affinity= 4
// nest_level= 2, parent_thrd_num= 0, thrd_num= 3, thrd_affinity= 6
// LEVEL 2 AFFINITIES, 4 threads on socket 1
// nest_level= 2, parent_thrd_num= 1, thrd_num= 0, thrd_affinity= 1
// nest_level= 2, parent_thrd_num= 1, thrd_num= 1, thrd_affinity= 3
// nest_level= 2, parent_thrd_num= 1, thrd_num= 2, thrd_affinity= 5
// nest_level= 2, parent_thrd_num= 1, thrd_num= 3, thrd_affinity= 7
// ... Do Some work on Socket
}
}
|
eavlDestinationTopologyMapOp.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_DESTINATION_TOPOLOGY_MAP_OP_H
#define EAVL_DESTINATION_TOPOLOGY_MAP_OP_H
#include "eavlCUDA.h"
#include "eavlCellSet.h"
#include "eavlCellSetExplicit.h"
#include "eavlCellSetAllStructured.h"
#include "eavlDataSet.h"
#include "eavlArray.h"
#include "eavlOpDispatch.h"
#include "eavlOperation.h"
#include "eavlTopology.h"
#include "eavlException.h"
#include <time.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#ifndef DOXYGEN
template <class CONN>
struct eavlDestinationTopologyMapOp_CPU
{
static inline eavlArray::Location location() { return eavlArray::HOST; }
template <class F, class IN, class OUT>
static void call(int nitems, CONN &conn,
const IN inputs, OUT outputs, F &functor)
{
int ids[MAX_LOCAL_TOPOLOGY_IDS];
#pragma omp parallel for private(ids)
for (int index = 0; index < nitems; ++index)
{
int nids;
int shapeType = conn.GetElementComponents(index, nids, ids);
collect(index, outputs) = functor(shapeType, nids, ids, collect(index, inputs));
}
}
};
#if defined __CUDACC__
template <class CONN, class F, class IN, class OUT>
__global__ void
eavlDestinationTopologyMapOp_kernel(int nitems, CONN conn,
const IN inputs, OUT outputs, F functor)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int ids[MAX_LOCAL_TOPOLOGY_IDS];
for (int index = threadID; index < nitems; index += numThreads)
{
int nids;
int shapeType = conn.GetElementComponents(index, nids, ids);
collect(index, outputs) = functor(shapeType, nids, ids, collect(index, inputs));
}
}
template <class CONN>
struct eavlDestinationTopologyMapOp_GPU
{
static inline eavlArray::Location location() { return eavlArray::DEVICE; }
template <class F, class IN, class OUT>
static void call(int nitems, CONN &conn,
const IN inputs, OUT outputs, F &functor)
{
int numThreads = 256;
dim3 threads(numThreads, 1, 1);
dim3 blocks (32, 1, 1);
eavlDestinationTopologyMapOp_kernel<<< blocks, threads >>>(nitems, conn,
inputs, outputs, functor);
CUDA_CHECK_ERROR();
}
};
#endif
#endif
// ****************************************************************************
// Class: eavlDestinationTopologyMapOp
//
// Purpose:
/// Map from input to output arrays on the same topology type.
///
/// Much like a standard map, in that it does an element-wise map
/// between arrays of the same length, but the fields are known to
/// be on some sort of topology type -- typically a cell. Or, you
/// could instead think of it like an eavlTopologyMap, but both the
/// inputs and outputs are on the same topology type. (Or like an
/// eavlCombinedTopologyMap, but without a source topology.)
///
/// Essentially, this just adds a "shapetype" to the functor call of
/// a standard map operation. For example, a cell-to-cell map would
/// be a simple map, but with the shape type (e.g. EAVL_HEX or
/// EAVL_TET) passed along with every functor call.
//
// Programmer: Jeremy Meredith
// Creation: August 1, 2013
//
// Modifications:
// ****************************************************************************
template <class I, class O, class F>
class eavlDestinationTopologyMapOp : public eavlOperation
{
protected:
eavlCellSet *cells;
eavlTopology topology;
I inputs;
O outputs;
F functor;
public:
eavlDestinationTopologyMapOp(eavlCellSet *c, eavlTopology t,
I i, O o, F f)
: cells(c), topology(t), inputs(i), outputs(o), functor(f)
{
}
virtual void GoCPU()
{
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitDestination &conn = elExp->GetDestination(topology);
eavlOpDispatch<eavlDestinationTopologyMapOp_CPU<eavlExplicitDestination> >(n, conn, inputs, outputs, functor);
}
else if (elStr)
{
eavlRegularDestination conn = eavlRegularDestination(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlDestinationTopologyMapOp_CPU<eavlRegularDestination> >(n, conn, inputs, outputs, functor);
}
}
virtual void GoGPU()
{
#ifdef HAVE_CUDA
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitDestination &conn = elExp->GetDestination(topology);
conn.shapetype.NeedOnDevice();
conn.destination.NeedOnDevice();
conn.mapCellToIndex.NeedOnDevice();
eavlOpDispatch<eavlDestinationTopologyMapOp_GPU<eavlExplicitDestination> >(n, conn, inputs, outputs, functor);
conn.shapetype.NeedOnHost();
conn.destination.NeedOnHost();
conn.mapCellToIndex.NeedOnHost();
}
else if (elStr)
{
eavlRegularDestination conn = eavlRegularDestination(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlDestinationTopologyMapOp_GPU<eavlRegularDestination> >(n, conn, inputs, outputs, functor);
}
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
// helper function for type deduction
template <class I, class O, class F>
eavlDestinationTopologyMapOp<I,O,F> *new_eavlDestinationTopologyMapOp(eavlCellSet *c, eavlTopology t,
I i, O o, F f)
{
return new eavlDestinationTopologyMapOp<I,O,F>(c,t,i,o,f);
}
#endif
|
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
template<int ndim, typename OP>
struct binary_broadcast_kernel {
/*! \brief Map function for binary_broadcast_kernel */
template<typename IType, typename DType>
MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req,
const Shape <ndim> &lstride, const Shape <ndim> &rstride,
const Shape <ndim> &oshape, IType *lhs, IType *rhs,
DType *out) {
Shape <ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
template<typename LType, typename RType, typename OType>
MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req,
const Shape <ndim> &lstride, const Shape <ndim> &rstride,
const Shape <ndim> &oshape, LType *lhs, RType *rhs,
OType *out) {
Shape <ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
template<typename IType, typename DType>
MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req,
const Shape <ndim> &lstride, const Shape <ndim> &rstride,
const Shape <ndim> &oshape, IType lhs, IType *rhs,
DType *out) {
Shape <ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
/* used for mixed type binary ops */
template<typename IType, typename DType,
typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req,
const Shape <ndim> &lstride, const Shape <ndim> &rstride,
const Shape <ndim> &oshape, IType *lhs, DType *rhs,
DType *out) {
Shape <ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx]));
}
}
/*! \brief Map function for binary_broadcast_kernel */
/* used for mixed type binary ops */
template<typename IType, typename DType,
typename std::enable_if<!std::is_same<IType, DType>::value &&
!std::is_pointer<IType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req,
const Shape <ndim> &lstride, const Shape <ndim> &rstride,
const Shape <ndim> &oshape, IType lhs, DType *rhs,
DType *out) {
Shape <ndim> coord = unravel(base, oshape);
auto lidx = static_cast<index_t>(dot(coord, lstride));
auto ridx = static_cast<index_t>(dot(coord, rstride));
KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx]));
// starts from 1 to avoid extra inc at end of loop
for (index_t i = 1; i < length; ++i) {
inc(&coord, oshape, &lidx, lstride, &ridx, rstride);
// When tuning, don't actually run the op, since it's not going to be tuned against
// the actual op we'll eventually be using
KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx]));
}
}
};
template<int req, typename OP, bool col_vec>
struct csr_dns_csr_broadcast_kernel {
/*!
* \brief Map function for broadcast between csr and 1D vector
* \param row global thread id/assigned row id
* \param csr_data ptr to data buffer of csr matrix
* \param csr_indices ptr to indices buffer of csr matrix
* \param csr_indptr ptr to indptr buffer of csr matrix
* \param dns ptr to data buffer of the dense vector
* \param out ptr to the data buffer of the result csr matrix
*/
template<typename DType, typename CType, typename RType>
MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices,
const RType *csr_indptr, const DType *dns, DType *out) {
const nnvm::dim_t curr_row_i = csr_indptr[row];
const nnvm::dim_t next_row_i = csr_indptr[row + 1];
for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) {
KERNEL_ASSIGN(out[iter], req, OP::Map(csr_data[iter],
(col_vec)? dns[row] : dns[csr_indices[iter]]));
}
}
/*!
* \brief Map function for broadcast between csr and a scalar
* \param i global thread id
* \param csr_data ptr to data buffer of csr matrix
* \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used
* \param out ptr to the data buffer of output csr matrix
* \param nnz number of non-zero elements in input csr matrix
*/
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, const DType *csr_data, const DType* scalar_ptr,
DType *out, const nnvm::dim_t nnz) {
const DType scale = scalar_ptr[0];
if (i < nnz) {
KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale));
}
}
};
template<int req, typename OP, bool reverse = false>
struct csr_dns_map_kernel {
template <typename DType, typename CType, typename RType>
MSHADOW_XINLINE static void Map(index_t row, const DType *csr_data, const CType *csr_indices,
const RType *csr_indptr, DType *out, const nnvm::dim_t num_rows,
const nnvm::dim_t num_cols) {
if (row < num_rows) {
const nnvm::dim_t curr_row_i = csr_indptr[row];
const nnvm::dim_t next_row_i = csr_indptr[row + 1];
for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) {
const nnvm::dim_t target = row * num_cols + csr_indices[iter];
KERNEL_ASSIGN(out[target], req,
reverse ? OP::Map(out[target], csr_data[iter]) :
OP::Map(csr_data[iter], out[target]));
}
}
}
};
} // namespace mxnet_op
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) {
*j = 0;
*k = 0;
#pragma unroll
for (index_t i = ndim-1, idx_t = idx; i >=0; --i) {
const auto tmp = idx_t / shape[i];
const auto coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small,
const Shape<ndim>& big,
Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
index_t s = 1;
#pragma unroll
for (int i = ndim - 1, j = mdim; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = mxnet_op::unravel(idx, oshape);
const index_t j = mxnet_op::ravel(coord, lshape);
const index_t k = mxnet_op::ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename AType, typename DType, typename OType,
typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, OType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = mxnet_op::unravel(idx, sshape);
index_t j = mxnet_op::ravel(coord, bshape);
AType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = mxnet_op::unravel(k, rshape);
AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]);
// argmin/max, set IndexedNum.idx
if (IndexOP::do_op)
IndexOP::Op(&temp, k);
Reducer::Reduce(val, temp, residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
namespace {
// Returns the stride with which the fastest dimension is moving.
// Used to detect memory access scatter.
inline int fastest_stride(const TShape &small, const TShape &big,
const TShape &big_stride) {
const int ndim = small.ndim();
for (int i = ndim-1; i >= 0; --i) {
if (big[i] != 1) {
return (small[i] == big[i]) ? 1 : big_stride[i];
}
}
return 1;
}
} // namespace
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
mshadow::Shape<ndim> oshape = out.shape_.get<ndim>();
mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>());
mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>::
template LaunchEx(s, out.shape_.Size(), req, lstride, rstride, oshape,
lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>());
}
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP,
typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, OType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>(idx, M, addto, big, small,
bshape, sshape, rshape, rstride);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto,
const DType* big, DType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const index_t* ws_dptr) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
Shape<ndim> coord = mxnet_op::unravel(idx, sshape);
index_t j = mxnet_op::ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual);
}
assign(&small[idx], addto, val);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
});
});
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
using namespace mxnet_op;
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_);
size_t N = small.shape_.Size(), M = rshape.Size();
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t k = 0; k < static_cast<index_t>(M); k++) {
Shape<ndim> coord = mxnet_op::unravel(k, rshape);
ws_dptr[k] = mxnet_op::dot(coord, rstride);
}
seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride, ws_dptr);
}
inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req,
const mxnet::TShape& big, const int type_size) {
return 0;
}
inline size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req,
const mxnet::TShape& big, const mxnet::TShape& lhs,
const mxnet::TShape& rhs, const int type_size) {
return 0;
}
#if MXNET_USE_CUDA
namespace {
constexpr int warpSize = 32;
constexpr int unroll_reduce = 2;
// Returns a/b integer division rounded up
template<typename Type>
Type ceil_idiv(const Type a, const Type b) {
return (a + b - 1)/b;
}
uint64_t calc_num_load(const int X, const int Y, const int* strides) {
// Number of full warps
uint64_t num_full_warp = X / warpSize;
// Length of the partial warp i.e. number of threads that are performing loads
uint64_t len_part_warp = X % warpSize;
uint64_t num_load_full = (std::min(warpSize, strides[0]) +
std::min(warpSize, strides[1]) +
std::min(warpSize, strides[2]))*num_full_warp;
uint64_t num_load_part =
(std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[0], warpSize)) +
std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[1], warpSize)) +
std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp*strides[2], warpSize)))*
(len_part_warp != 0);
uint64_t num_load = (num_load_full + num_load_part)*(uint64_t)Y;
return num_load;
}
inline int diff(const TShape& small, const TShape& big,
TShape* dims, TShape* stride) {
int ndim = small.ndim();
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
index_t s = 1;
#pragma unroll
for (int i = ndim - 1, j = mdim; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
constexpr int nthread_reduce = 512;
constexpr index_t kBaseGridNum = 1024;
} // namespace
// Configuration for ReduceImpl()
struct ReduceImplConfig {
index_t N;
index_t M;
index_t Mnext;
struct {
dim3 blockDim;
dim3 gridDim;
int shMemSize;
bool do_transpose;
} kernel_1;
struct {
int blockSize;
int gridSize;
} kernel_2;
size_t workspace_size;
TShape rshape, rstride;
TShape lhs_shape, lhs_stride;
TShape rhs_shape, rhs_stride;
inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big,
const ::mxnet::TShape* lhs,
const ::mxnet::TShape* rhs,
const size_t type_size) :
rshape(small.ndim(), 1), rstride(small.ndim(), 1),
lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1),
rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) {
constexpr int maxLoopPerTB = 64;
int ndim = small.ndim();
diff(small, big, &rshape, &rstride);
N = small.Size();
M = rshape[0];
for (int i = 1; i < ndim; ++i) {
M *= rshape[i];
}
bool multiOp = false;
if (lhs != nullptr) {
CHECK_NOTNULL(rhs);
diff(small, *lhs, &lhs_shape, &lhs_stride);
diff(small, *rhs, &rhs_shape, &rhs_stride);
multiOp = true;
}
workspace_size = 0;
kernel_1.shMemSize = 0;
kernel_1.do_transpose = false;
if (M == 1) {
kernel_1.blockDim.x = nthread_reduce;
kernel_1.gridDim.x = std::min(kBaseGridNum,
static_cast<index_t>((N + kernel_1.blockDim.x - 1)/kernel_1.blockDim.x));
} else {
int reduce_strides[3];
reduce_strides[0] = fastest_stride(small, big, big);
reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1;
reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1;
int reduce_strides_transp[3];
reduce_strides_transp[0] = fastest_stride(small, rshape, rstride);
reduce_strides_transp[1] = (multiOp) ?
fastest_stride(small, lhs_shape, lhs_stride) : 1;
reduce_strides_transp[2] = (multiOp) ?
fastest_stride(small, rhs_shape, rhs_stride) : 1;
uint64_t num_load = calc_num_load(N, M, reduce_strides);
uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp);
Mnext = 1;
kernel_1.do_transpose = (num_load > num_load_transp);
kernel_1.blockDim.x = 0;
kernel_1.blockDim.y = 0;
if (kernel_1.do_transpose) {
// Fastest thread ID goes through M
// Loop over N has step size kernel_1.blockDim.y
if (N < 8) {
kernel_1.blockDim.y = 1;
} else if (N < 256) {
kernel_1.blockDim.y = 4;
} else {
if (M < 8) {
kernel_1.blockDim.x = 1;
} else if (M < 256) {
kernel_1.blockDim.x = 4;
} else {
kernel_1.blockDim.x = warpSize;
}
}
} else {
// Fastest thread ID goes through N
// Loop over M has step size kernel_1.blockDim.y
if (M < 8) {
kernel_1.blockDim.y = 1;
} else if (M < 256) {
kernel_1.blockDim.y = 4;
} else {
if (N < 8) {
kernel_1.blockDim.x = 1;
} else if (N < 256) {
kernel_1.blockDim.x = 4;
} else {
kernel_1.blockDim.x = warpSize;
}
}
}
if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) {
LOG(FATAL) << "Unable to set blockDim";
} else if (kernel_1.blockDim.x == 0) {
kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y;
} else if (kernel_1.blockDim.y == 0) {
kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x;
}
if (kernel_1.do_transpose) {
// Fastest thread ID goes through M
kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum,
ceil_idiv<unsigned int>(N, kernel_1.blockDim.y));
kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext);
int by = kernel_1.blockDim.y;
if (kernel_1.blockDim.y % warpSize == 0) {
// Fix shared memory bank conflict
by++;
}
kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ?
kernel_1.blockDim.x*by*type_size * 2 : 0;
// Maximum number of times we want TB to loop in M
// Max size of M-block each TB can handle
int maxMblock = kernel_1.blockDim.x*maxLoopPerTB;
Mnext = (M + maxMblock - 1) / maxMblock;
} else {
// Fastest thread ID goes through N
kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum,
ceil_idiv<unsigned int>(N, kernel_1.blockDim.x));
kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext);
kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ?
kernel_1.blockDim.x*kernel_1.blockDim.y*type_size * 2 : 0;
// Maximum number of times we want TB to loop in M
// Max size of M-block each TB can handle
int maxMblock = kernel_1.blockDim.y*maxLoopPerTB;
Mnext = (M + maxMblock - 1) / maxMblock;
}
if (Mnext > 1) {
// small_dptr[] is N*Mnext*type_size bytes
workspace_size += N*Mnext*sizeof(double);
// Set gridDim.y to Mnext
kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext);
}
if (Mnext > 1) {
kernel_2.blockSize = nthread_reduce;
kernel_2.gridSize = std::min(kBaseGridNum,
static_cast<index_t>((N + kernel_2.blockSize - 1)/kernel_2.blockSize));
}
}
}
};
inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req,
const ::mxnet::TShape& big, const int type_size) {
if (req == kNullOp) return 0;
ReduceImplConfig config(small, big, nullptr, nullptr, type_size);
return config.workspace_size;
}
inline size_t ReduceWorkspaceSize(Stream<gpu> *s, const ::mxnet::TShape& small, const OpReqType req,
const ::mxnet::TShape& big, const ::mxnet::TShape& lhs,
const ::mxnet::TShape& rhs, const int type_size) {
if (req == kNullOp) return 0;
ReduceImplConfig config(small, big, &lhs, &rhs, type_size);
return config.workspace_size;
}
#ifdef __CUDACC__
#include "broadcast_reduce-inl.cuh"
#endif
#endif // MXNET_USE_CUDA
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = mxnet_op::unravel(idx, small_shape);
const index_t idx_big0 = mxnet_op::ravel(coord, big_shape);
const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0);
const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = mxnet_op::unravel(k, rshape);
index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride);
Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#if MXNET_USE_CUDA
void RTCReduce(const OpContext& ctx,
const TBlob& small,
const OpReqType req,
const Tensor<gpu, 1, char>& workspace,
const TBlob& big,
const std::string& reducer,
int ndim,
const std::string& OP);
void RTCReduce(const OpContext& ctx,
const TBlob& small,
const OpReqType req,
const Tensor<gpu, 1, char>& workspace,
const TBlob& big,
const TBlob &lhs,
const TBlob &rhs,
const std::string& reducer,
int ndim,
const std::string& OP1,
const std::string& OP2);
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
pi_loop.c | /**
* This program will numerically compute the integral of
* 4/(1+x*x)
* from 0 to 1. The value of this integral is pi -- which
* is great since it gives us an easy way to check the answer.
*
* The program was parallelized using OpenMP by adding just
* four lines
*
* (1) A line to include omp.h -- the include file that
* contains OpenMP's function prototypes and constants.
*
* (2) A pragma that tells OpenMP to create a team of threads
*
* (3) A pragma to cause one of the threads to print the
* number of threads being used by the program.
*
* (4) A pragma to split up loop iterations among the team
* of threads. This pragma includes 2 clauses to (1) create a
* private variable and (2) to cause the threads to compute their
* sums locally and then combine their local sums into a
* single global value.
*
* History: Written by Tim Mattson, 11/99.
**/
#include <stdio.h>
#include <omp.h>
static long num_steps = 100000000;
double step;
int main()
{
int i;
double x, pi, sum = 0.0;
double start_time, run_time;
step = 1.0 / (double)num_steps;
for (i = 1; i <= 4; i++)
{
sum = 0.0;
omp_set_num_threads(i);
start_time = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
printf(" num_threads = %d", omp_get_num_threads());
#pragma omp for reduction(+ \
: sum)
for (i = 1; i <= num_steps; i++)
{
x = (i - 0.5) * step;
sum = sum + 4.0 / (1.0 + x * x);
}
}
pi = step * sum;
run_time = omp_get_wtime() - start_time;
printf("\n pi is %f in %f seconds and %d threads\n", pi, run_time, i);
}
}
|
trsm_x_bsr_n_hi_col.c | #include "alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
const ALPHA_INT num_thread = alpha_get_thread_num();
const ALPHA_INT bs = A->block_size;
ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(A->rows*bs*sizeof(ALPHA_Number));
const ALPHA_INT m = A->rows*bs;
const ALPHA_INT n = A->cols*bs;
memset(diag, '\0', m * sizeof(ALPHA_Number));
const ALPHA_INT bs2 = bs * bs;
const ALPHA_INT b_rows = m / bs;
const ALPHA_INT b_cols = n / bs;
const alphasparse_layout_t block_layout = A->block_layout;
if(block_layout != ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
printf("layout not consistent!!!\n");
exit(-1);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT br = 0 ; br < b_rows; br++){
for(ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++){
ALPHA_INT bc = A->col_indx[ai];
if(bc == br){
for(ALPHA_INT b_row = 0 ; b_row < bs ; b_row++){
diag[index2(br,b_row,bs)] = A->values[ai * bs2 + b_row *(bs + 1)];
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
ALPHA_Number* temp = (ALPHA_Number*) alpha_malloc(bs*sizeof(ALPHA_Number));
const ALPHA_INT y0_offset = out_y_col * ldy;
const ALPHA_INT x0_offset = out_y_col * ldx;
for (ALPHA_INT br = b_rows - 1; br >= 0; br--)
{
for(ALPHA_INT i = 0 ; i < bs ; i++){
alpha_setzero(temp[i]);
}
ALPHA_INT diagBlock = -1;
for (ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
if(bc > br)
//col-major
for(ALPHA_INT col = 0; col < bs; col++)
{
//all entities belongs to upper triangle
ALPHA_INT y_offset = y0_offset + bc * bs + col;
ALPHA_INT a0_offset = ai * bs2 + col * bs;
for(ALPHA_INT row = 0 ; row < bs ; row++)
{
ALPHA_INT ele_offset = a0_offset + row;
alpha_madde(temp[row], A->values[ ele_offset ] ,y[y_offset]);
}
}
//diagonal must be none-zero block
if( bc==br ){
diagBlock = ai;
}
}
if(diagBlock == -1)
{
printf("lhs matrix invalid for trsm!!!\n");
exit(-1);
}
//col-major
//right-bottom most
for(ALPHA_INT col = bs - 1; col >= 0; col--)
{
//upper triangle of block
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t,alpha,x[x0_offset + br * bs + col]);
alpha_sub(t,t,temp[col]);
alpha_div(y[y0_offset + br * bs + col],t,diag[col + br * bs]);
for(ALPHA_INT row = col - 1; row >= 0; row--){
alpha_madde(temp[row], A->values[ diagBlock * bs2 + col * bs + row],y[y0_offset + br * bs + col ]);
}
}
}
alpha_free(temp);
}
alpha_free(diag);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
DataTypeConversions.h | //
// Created by raver119 on 21.11.17.
//
#ifndef LIBND4J_DATATYPECONVERSIONS_H
#define LIBND4J_DATATYPECONVERSIONS_H
#include <pointercast.h>
#include <helpers/logger.h>
#include <op_boilerplate.h>
#include <array/DataType.h>
#include <types/float16.h>
#include <helpers/BitwiseUtils.h>
namespace nd4j {
template <typename T>
class DataTypeConversions {
public:
static FORCEINLINE void convertType(T* buffer, void* src, DataType dataType, ByteOrder order, Nd4jIndex length) {
bool isBe = BitwiseUtils::isBE();
bool canKeep = (isBe && order == ByteOrder::BE) || (!isBe && order == ByteOrder::LE);
switch (dataType) {
case DataType_FLOAT: {
auto tmp = (float *) src;
#pragma omp parallel for simd schedule(guided)
for (Nd4jIndex e = 0; e < length; e++) {
buffer[e] = canKeep ? (T) tmp[e] : BitwiseUtils::swap_bytes<T>((T) tmp[e]);
}
}
break;
case DataType_DOUBLE: {
auto tmp = (double *) src;
#pragma omp parallel for simd schedule(guided)
for (Nd4jIndex e = 0; e < length; e++)
buffer[e] = canKeep ? (T) tmp[e] : BitwiseUtils::swap_bytes<T>((T) tmp[e]);
}
break;
case DataType_HALF: {
auto tmp = (float16 *) src;
#pragma omp parallel for simd schedule(guided)
for (Nd4jIndex e = 0; e < length; e++)
buffer[e] = canKeep ? (T) tmp[e] : BitwiseUtils::swap_bytes<T>((T) tmp[e]);
}
break;
default: {
nd4j_printf("Unsupported DataType requested: [%i]\n", (int) dataType);
throw "Unsupported DataType";
}
}
}
};
}
#endif //LIBND4J_DATATYPECONVERSIONS_H
|
3mm.c | /**
* 3mm.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NI SIZE
#define NJ SIZE
#define NK SIZE
#define NL SIZE
#define NM SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D) {
int i, j;
for (i = 0; i < NI; i++) {
for (j = 0; j < NK; j++) {
A[i * NK + j] = ((DATA_TYPE)i * j) / NI;
}
}
for (i = 0; i < NK; i++) {
for (j = 0; j < NJ; j++) {
B[i * NJ + j] = ((DATA_TYPE)i * (j + 1)) / NJ;
}
}
for (i = 0; i < NJ; i++) {
for (j = 0; j < NM; j++) {
C[i * NM + j] = ((DATA_TYPE)i * (j + 3)) / NL;
}
}
for (i = 0; i < NM; i++) {
for (j = 0; j < NL; j++) {
D[i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK;
}
}
}
int compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) {
int i, j, fail;
fail = 0;
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
if (percentDiff(G[i * NL + j], G_outputFromGpu[i * NL + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) {
int i, j, k;
/* E := A*B */
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
E[i * NJ + j] = 0;
for (k = 0; k < NK; ++k) {
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
/* F := C*D */
for (i = 0; i < NJ; i++) {
for (j = 0; j < NL; j++) {
F[i * NL + j] = 0;
for (k = 0; k < NM; ++k) {
F[i * NL + j] += C[i * NM + k] * D[k * NL + j];
}
}
}
/* G := E*F */
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
G[i * NL + j] = 0;
for (k = 0; k < NJ; ++k) {
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
}
void mm3_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) {
/* E := A*B */
#pragma omp target map( \
to : A[ : NI *NK], B[ : NK *NJ], C[ : NJ *NM], D[ : NM *NL]) map( \
tofrom : E[ : NI *NJ], F[ : NJ *NL]) \
map(from : G[ : NI *NL]) \
device(DEVICE_ID)
{
#pragma omp parallel for
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NJ; j++) {
E[i * NJ + j] = 0;
for (int k = 0; k < NK; ++k) {
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
/* F := C*D */
#pragma omp parallel for
for (int i = 0; i < NJ; i++) {
for (int j = 0; j < NL; j++) {
F[i * NL + j] = 0;
for (int k = 0; k < NM; ++k) {
F[i * NL + j] += C[i * NM + k] * D[k * NL + j];
}
}
}
/* G := E*F */
#pragma omp parallel for
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NL; j++) {
G[i * NL + j] = 0;
for (int k = 0; k < NJ; ++k) {
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
}
}
int main(int argc, char **argv) {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *C;
DATA_TYPE *D;
DATA_TYPE *E;
DATA_TYPE *F;
DATA_TYPE *G;
DATA_TYPE *E_outputFromGpu;
DATA_TYPE *F_outputFromGpu;
DATA_TYPE *G_outputFromGpu;
A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(NJ * NM * sizeof(DATA_TYPE));
D = (DATA_TYPE *)malloc(NM * NL * sizeof(DATA_TYPE));
E = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
F = (DATA_TYPE *)malloc(NJ * NL * sizeof(DATA_TYPE));
G = (DATA_TYPE *)malloc(NI * NL * sizeof(DATA_TYPE));
E_outputFromGpu = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE));
F_outputFromGpu = (DATA_TYPE *)calloc(NJ * NL, sizeof(DATA_TYPE));
G_outputFromGpu = (DATA_TYPE *)calloc(NI * NL, sizeof(DATA_TYPE));
fprintf(
stdout,
"<< Linear Algebra: 3 Matrix Multiplications (E=A.B; F=C.D; G=E.F) >>\n");
init_array(A, B, C, D);
t_start = rtclock();
mm3_OMP(A, B, C, D, E_outputFromGpu, F_outputFromGpu, G_outputFromGpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
mm3_cpu(A, B, C, D, E, F, G);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(G, G_outputFromGpu);
#endif
free(A);
free(B);
free(C);
free(D);
free(E);
free(F);
free(G);
free(G_outputFromGpu);
return fail;
}
|
assign_h_by_distance_process.h | /*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: anonymous $
// Date: $Date: 2008-11-19 15:38:01 $
// Revision: $Revision: 1.1 $
//
//
#if !defined(KRATOS_ASSIGN_H_BY_DISTANCE_PROCESS_INCLUDED)
#define KRATOS_ASSIGN_H_BY_DISTANCE_PROCESS_INCLUDED
#include <string>
#include <iostream>
#include <algorithm>
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/triangle_2d_3.h"
#include "utilities/openmp_utils.h"
//#include "kratos/applications/MeshingApplication/meshing_application.h"
namespace Kratos
{
class AssignHByDistanceProcess
: public Process
{
public:
AssignHByDistanceProcess(ModelPart& model_part, double min_H, double sec_min_H, double ref_dist, double air_H )
:Process(), mr_model_part(model_part), mr_min_H(min_H), mr_sec_min_H(sec_min_H), mr_ref_dist(ref_dist), mr_air_H(air_H)
{
}
/// Destructor.
virtual ~AssignHByDistanceProcess()
{
}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
virtual void Execute()
{
double slope = (mr_sec_min_H - mr_min_H)/mr_ref_dist;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector NodePartition;
OpenMPUtils::DivideInPartitions(mr_model_part.Nodes().size(),NumThreads,NodePartition);
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
ModelPart::NodeIterator NodesBegin = mr_model_part.NodesBegin() + NodePartition[k];
ModelPart::NodeIterator NodesEnd = mr_model_part.NodesBegin() + NodePartition[k+1];
for (ModelPart::NodeIterator nd = NodesBegin; nd != NodesEnd; nd++)
{
const double current_dist = nd->FastGetSolutionStepValue(DISTANCE);
if(current_dist <= mr_ref_dist && current_dist>0.0)
nd->FastGetSolutionStepValue(NODAL_H) = mr_min_H + slope*current_dist;
if(current_dist <= 0.0)
nd->FastGetSolutionStepValue(NODAL_H) = mr_air_H;
}
KRATOS_WATCH("++++++++++++++++++++END OF SaveElementByFlagProcess PROCESS ^^^^^^^^^^^^^^^^^^^^^^");
}
}
private:
ModelPart& mr_model_part;
double mr_min_H;
double mr_sec_min_H;
double mr_ref_dist;
double mr_air_H;
};
}//namespace kratos
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
DRB054-inneronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
The inner level loop can be parallelized.
*/
int main()
{
int i,j;
int n=100, m=100;
double b[n][m];
#pragma omp parallel for private(i ,j )
for(i=0;i<n; i++)
#pragma omp parallel for private(j )
for(j=0;j<n; j++)
b[i][j]=(double)(i*j);
for (i=1;i<n;i++)
#pragma omp parallel for private(j )
for (j=1;j<m;j++)
b[i][j]=b[i-1][j-1];
for(i=0;i<n; i++)
for(j=0;j<n; j++)
printf("%lf\n", b[i][j]);
return 0;
}
|
GB_unop__identity_fc64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fp32)
// op(A') function: GB (_unop_tran__identity_fc64_fp32)
// C type: GxB_FC64_t
// A type: float
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fp32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include <map>
#include <numeric>
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#endif
#include "Particle/DistanceTableData.h"
#include "LongRange/StructFact.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
namespace qmcplusplus
{
// helper class to activate KEcorr during optimizing Jastrow
template<typename RT, class FT>
class J2KECorrection
{
size_t num_groups_;
std::vector<size_t> num_elec_in_groups_;
RT num_elecs_;
RT vol;
RT G0mag;
const std::vector<FT*>& F_;
bool SK_enabled;
public:
J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F)
: num_groups_(targetPtcl.groups()),
num_elecs_(targetPtcl.getTotalNum()),
vol(targetPtcl.Lattice.Volume),
F_(F),
SK_enabled(targetPtcl.SK != nullptr)
{
// compute num_elec_in_groups_
num_elec_in_groups_.reserve(3);
for (int i = 0; i < num_groups_; i++)
num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i));
if (SK_enabled)
G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]);
}
RT computeKEcorr()
{
if (!SK_enabled)
return 0;
const int numPoints = 1000;
RT uk = 0.0;
RT a = 1.0;
for (int i = 0; i < num_groups_; i++)
{
int Ni = num_elec_in_groups_[i];
for (int j = 0; j < num_groups_; j++)
{
int Nj = num_elec_in_groups_[j];
if (F_[i * num_groups_ + j])
{
FT& ufunc = *(F_[i * num_groups_ + j]);
RT radius = ufunc.cutoff_radius;
RT k = G0mag;
RT dr = radius / (RT)(numPoints - 1);
for (int ir = 0; ir < numPoints; ir++)
{
RT r = dr * (RT)ir;
RT u = ufunc.evaluate(r);
uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj);
}
}
}
}
for (int iter = 0; iter < 20; iter++)
a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a)));
return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_;
}
};
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
class J2OrbitalSoA : public WaveFunctionComponent
{
public:
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using RowContainer = DistanceTableData::RowContainer;
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
// Ye: leaving this public is bad but currently used by unit tests.
///Container for \f$F[ig*NumGroups+jg]\f$.
std::vector<FT*> F;
protected:
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Uniquue J2 set for cleanup
std::map<std::string, FT*> J2Unique;
/// e-e table ID
const int my_table_ID_;
// helper for compute J2 Chiesa KE correction
J2KECorrection<RealType, FT> j2_ke_corr_helper;
public:
J2OrbitalSoA(ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
void resetTargetParticleSet(ParticleSet& P)
{
if (dPsi)
dPsi->resetTargetParticleSet(P);
}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if (dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if (dPsi)
dPsi->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); }
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L);
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
PsiValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] =
std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).Distances[k]));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist)
{
valT curUat(0);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P,
int iat,
const RealType* restrict dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle = false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const
{
posT grad;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < N; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return grad;
}
/**@} */
RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); }
RealType KECorrection() { return KEcorr; }
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : my_table_ID_(p.addTable(p, DT_SOA)), j2_ke_corr_helper(p, F)
{
init(p);
KEcorr = 0.0;
ClassName = "J2OrbitalSoA";
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it = J2Unique.begin();
while (it != J2Unique.end())
{
delete ((*it).second);
++it;
}
} //need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N = p.getTotalNum();
N_padded = getAlignedSize<valT>(N);
NumGroups = p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups * NumGroups, nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if (ia == ib)
{
if (ia == 0) //first time, assign everything
{
int ij = 0;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg, ++ij)
if (F[ij] == nullptr)
F[ij] = j;
}
else
F[ia * NumGroups + ib] = j;
}
else
{
if (N == 2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg)
F[ig * NumGroups + jg] = j;
}
else
{
// generic case
F[ia * NumGroups + ib] = j;
F[ib * NumGroups + ia] = j;
}
}
std::stringstream aname;
aname << ia << ib;
J2Unique[aname.str()] = j;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(tqp, -1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*, FT*> fcmap;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = ig; jg < NumGroups; ++jg)
{
int ij = ig * NumGroups + jg;
if (F[ij] == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]);
if (fit == fcmap.end())
{
FT* fc = new FT(*F[ij]);
j2copy->addFunc(ig, jg, fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]] = fc;
}
}
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P,
int iat,
const RealType* restrict dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle)
{
const int jelmax = triangle ? iat : N;
constexpr valT czero(0);
std::fill_n(u, jelmax, czero);
std::fill_n(du, jelmax, czero);
std::fill_n(d2u, jelmax, czero);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax, P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode = ORB_PBYP_RATIO;
cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).Temp_r.data());
return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat));
}
template<typename FT>
inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& d_table = P.getDistTable(my_table_ID_);
const auto* restrict dist = d_table.Temp_r.data();
for (int ig = 0; ig < NumGroups; ++ig)
{
const int igt = ig * NumGroups;
valT sumU(0);
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data());
}
for (int i = P.first(ig); i < P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt + ig]->evaluate(dist[i]);
ratios[i] = std::exp(Uat[i] + Uself - sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(my_table_ID_).Temp_r.data(), cur_u.data(), cur_du.data(), cur_d2u.data());
cur_Uat = simd::accumulate_n(cur_u.data(), N, valT());
DiffVal = Uat[iat] - cur_Uat;
grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).Temp_dr);
return std::exp(static_cast<PsiValueType>(DiffVal));
}
template<typename FT>
void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat)
{
// get the old u, du, d2u
const auto& d_table = P.getDistTable(my_table_ID_);
computeU3(P, iat, d_table.Distances[iat], old_u.data(), old_du.data(), old_d2u.data());
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
const auto* restrict dist = d_table.Temp_r.data();
computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr = d_table.Temp_dr;
const auto& old_dr = d_table.Displacements[iat];
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : cur_d2Uat)
for (int jat = 0; jat < N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac * cur_du[jat];
const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict new_dX = new_dr.data(idim);
const valT* restrict old_dX = old_dr.data(idim);
const valT* restrict cur_du_pt = cur_du.data();
const valT* restrict old_du_pt = old_du.data();
valT* restrict save_g = dUat.data(idim);
valT cur_g = cur_dUat[idim];
#pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt)
for (int jat = 0; jat < N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat] * old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const auto& d_table = P.getDistTable(my_table_ID_);
for (int ig = 0; ig < NumGroups; ++ig)
{
for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat)
{
computeU3(P, iat, d_table.Distances[iat], cur_u.data(), cur_du.data(), cur_d2u.data(), true);
Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const RowContainer& displ = d_table.Displacements[iat];
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : lap) aligned(du, d2u)
for (int jat = 0; jat < iat; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < iat; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
dUat(iat) = grad;
d2Uat[iat] = -lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u, du, d2u)
for (int jat = 0; jat < iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat] + lapfac * du[jat];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict dX = displ.data(idim);
#pragma omp simd aligned(save_g, du, dX)
for (int jat = 0; jat < iat; jat++)
save_g[jat] -= du[jat] * dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P, G, L, true);
return LogValue;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
LogValue = -LogValue * 0.5;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
LogValue = 0.0;
const DistanceTableData& d_ee(P.getDistTable(my_table_ID_));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int i = 1; i < N; ++i)
{
const valT* dist = d_ee.Distances[i];
const RowContainer& displ = d_ee.Displacements[i];
auto ig = P.GroupID[i];
const int igt = ig * NumGroups;
for (int j = 0; j < i; ++j)
{
auto r = dist[j];
auto rinv = 1.0 / r;
auto dr = displ[j];
auto jg = P.GroupID[j];
auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2);
LogValue -= uij;
auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
grad_grad_psi[i] -= hess;
grad_grad_psi[j] -= hess;
}
}
}
} // namespace qmcplusplus
#endif
|
diamond_cmap.h | // This is the implementation using the connectivity map (c-map)
std::cout << "Running the c-map implementation\n";
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
#if 1
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : g.N(v0)) cmap[u] = 1;
for (auto v1 : g.N(v0)) {
if (v1 >= v0) break;
//uint64_t n = 0;
VertexSet y0y1;
for (auto u : g.N(v1)) {
#if 0
auto c1 = read_cycle();
auto ccode = cmap[u];
auto c2 = read_cycle();
if (nqueries[tid] < NUM_SAMPLES) {
auto tick = c2 - c1;
//std::cout << tick << "\n";
if (tick < 500) {
nticks[tid] += tick;
nqueries[tid] ++;
}
}
if (ccode == 1) y0y1.add(u);
#else
if (cmap[u] == 1) y0y1.add(u);
#endif
}
for (auto v2 : y0y1) {
for (auto v3 : y0y1) {
if (v3 >= v2) break;
counter ++;
}
}
//counter += n * (n-1) / 2;
}
for (auto u : g.N(v0)) cmap[u] = 0;
#else
for (auto v1 : g.N(v0)) {
if (v1 >= v0) break;
uint64_t n = intersect(g, v0, v1);
counter += n * (n-1) / 2;
}
#endif
}
|
RCCE_lib.h | //
// Copyright 2010 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef RCCE_LIB_H
#define RCCE_LIB_H
#include "RCCE.h"
#if defined(_OPENMP) && !defined(__hermit__)
#include <omp.h>
#endif
#include <string.h>
//#define AIR
#undef USE_FLAG_EXPERIMENTAL
#undef USE_RCCE_COMM
#undef USE_FAT_BARRIER
#undef USE_PIPELINE_FLAGS
#undef USE_PROBE_FLAGS
#undef USE_TAGGED_FLAGS
#undef USE_TAGGED_FOR_SHORT
#undef USE_REVERTED_FLAGS
#undef USE_REMOTE_PUT_LOCAL_GET
#undef USE_PROBE_FLAGS_SHORTCUT
#define USE_SYNCH_FOR_ZERO_BYTE
// override certain settings for SCC-MPICH:
//#include "scc-mpich-defs.h"
// adjust settings automatically?
#undef AUTO_ADJUST_SETTINGS
////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef AUTO_ADJUST_SETTINGS
#ifdef SINGLEBITFLAGS
#ifdef USE_TAGGED_FLAGS
#warning TAGGED FLAGS CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_TAGGED_FLAGS)
#undef USE_TAGGED_FLAGS
#undef USE_TAGGED_FOR_SHORT
#undef USE_PROBE_FLAGS_SHORTCUT
#endif
#ifdef USE_FAT_BARRIER
#warning FAT BARRIER CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_FAT_BARRIER)
#undef USE_FAT_BARRIER
#endif
#endif
#ifdef USE_PROBE_FLAGS_SHORTCUT
#ifndef USE_PROBE_FLAGS
#warning THE PROBE FLAGS SHORTCUT REQUIRES PROBE FLAGS! (#define USE_PROBE_FLAGS)
#define USE_PROBE_FLAGS
#endif
#ifndef USE_TAGGED_FOR_SHORT
#warning THE PROBE FLAGS SHORTCUT REQUIRES TAGGED FLAGS! (#define USE_TAGGED_FLAGS)
#define USE_TAGGED_FLAGS
#endif
#endif
#ifdef USE_TAGGED_FOR_SHORT
#ifndef USE_TAGGED_FLAGS
#warning TAGGED SHORT MESSAGES REQUIRE TAGGED FLAGS! (#define USE_TAGGED_FLAGS)
#define USE_TAGGED_FLAGS
#endif
#endif
#ifdef USE_REMOTE_PUT_LOCAL_GET
#ifndef USE_PROBE_FLAGS
#warning PROBING FOR MESSAGES IN REMOTE-PUT/LOCAL-GET NEEDS ADDITIONAL PROBE FLAGS! (#define USE_PROBE_FLAGS)
#define USE_PROBE_FLAGS
#endif
#endif
#ifdef SCC_COUPLED_SYSTEMS
#ifndef USE_REVERTED_FLAGS
#ifdef USE_TAGGED_FLAGS
#warning COUPLED SYSTEMS REQUIRE REVERTED FLAGS WHEN USING TAGGED FLAGS! (#define USE_REVERTED_FLAGS)
#define USE_REVERTED_FLAGS
#endif
#endif
#ifndef USE_REMOTE_PUT_LOCAL_GET
#warning COUPLED SYSTEMS SHOULD USE REMOTE-PUT/LOCAL-GET! (#define USE_REMOTE_PUT_LOCAL_GET)
#define USE_REMOTE_PUT_LOCAL_GET
#endif
#else
#ifdef USE_PROBE_FLAGS
#warning NON-COUPLED SYSTEMS SHOULD NOT USE ADDITIONAL PROBE FLAGS! (#undef USE_PROBE_FLAGS)
#undef USE_PROBE_FLAGS
#endif
#endif
#ifdef USE_PROBE_FLAGS
#ifdef USE_FAT_BARRIER
#warning PROBABLY TOO LITTLE MPB SPACE FOR USING FAT BARRIER WITH PROBE FLAGS ENABLED! (#undef USE_FAT_BARRIER)
#undef USE_FAT_BARRIER
#endif
#endif
////////////////////////////////////////////////////////////////////////////////////////////////
#else // !AUTO_ADJUST_SETTINGS
#ifdef SINGLEBITFLAGS
#ifdef USE_TAGGED_FLAGS
#error TAGGED FLAGS CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_TAGGED_FLAGS)
#endif
#undef USE_TAGGED_FLAGS
#undef USE_TAGGED_FOR_SHORT
#undef USE_PROBE_FLAGS_SHORTCUT
#ifdef USE_FAT_BARRIER
#error FAT BARRIER CANNOT BE USED WITH SINGLEBITFLAGS! (#undef USE_FAT_BARRIER)
#endif
#endif
#ifdef USE_PROBE_FLAGS_SHORTCUT
#ifndef USE_PROBE_FLAGS
#error THE PROBE FLAGS SHORTCUT REQUIRES PROBE FLAGS! (#define USE_PROBE_FLAGS)
#endif
#ifndef USE_TAGGED_FOR_SHORT
#error THE PROBE FLAGS SHORTCUT REQUIRES TAGGED FLAGS! (#define USE_TAGGED_FLAGS)
#endif
#endif
#ifdef USE_TAGGED_FOR_SHORT
#ifndef USE_TAGGED_FLAGS
#error TAGGED SHORT MESSAGES REQUIRE TAGGED FLAGS! (#define USE_TAGGED_FLAGS)
#endif
#endif
#ifdef USE_REMOTE_PUT_LOCAL_GET
#ifndef USE_PROBE_FLAGS
#warning PROBING FOR MESSAGES IN REMOTE-PUT/LOCAL-GET NEEDS ADDITIONAL PROBE FLAGS! (#define USE_PROBE_FLAGS)
#endif
#endif
#ifdef SCC_COUPLED_SYSTEMS
#ifdef USE_TAGGED_FLAGS
#ifndef USE_REVERTED_FLAGS
#error COUPLED SYSTEMS REQUIRE REVERTED FLAGS WHEN USING TAGGED FLAGS! (#define USE_REVERTED_FLAGS)
#endif
#endif
#ifndef USE_REMOTE_PUT_LOCAL_GET
#warning COUPLED SYSTEMS SHOULD USE REMOTE-PUT/LOCAL-GET! (#define USE_REMOTE_PUT_LOCAL_GET)
#endif
#else
#ifdef USE_PROBE_FLAGS
#warning NON-COUPLED SYSTEMS SHOULD NOT USE ADDITIONAL PROBE FLAGS! (#undef USE_PROBE_FLAGS)
#endif
#endif
#ifdef USE_PROBE_FLAGS
#ifdef USE_FAT_BARRIER
#warning PROBABLY TOO LITTLE MPB SPACE FOR USING FAT BARRIER WITH PROBE FLAGS ENABLED! (#undef USE_FAT_BARRIER)
#endif
#endif
#endif // !AUTO_ADJUST_SETTINGS
////////////////////////////////////////////////////////////////////////////////////////////////
/* PAD32byte is used to compute a cacheline padded length of n (input) bytes */
#define PAD32byte(n) ((n)%32==0 ? (n) : (n) + 32 - (n)%32)
//#define BITSPERCHAR 8
#define BOTH_IN_COMM_BUFFER 12
#define SOURCE_IN_PRIVATE_MEMORY 34
#define TARGET_IN_PRIVATE_MEMORY 56
#ifdef SINGLEBITFLAGS
#define RCCE_FLAGS_PER_BYTE 8
#else
#define RCCE_FLAGS_PER_BYTE 1
#endif
#define RCCE_FLAGS_PER_LINE (RCCE_LINE_SIZE*RCCE_FLAGS_PER_BYTE)
#define RCCE_SUM_INT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_INT))
#define RCCE_SUM_LONG (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_LONG))
#define RCCE_SUM_FLOAT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_FLOAT))
#define RCCE_SUM_DOUBLE (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_DOUBLE))
#define RCCE_MAX_INT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_INT))
#define RCCE_MAX_LONG (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_LONG))
#define RCCE_MAX_FLOAT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_FLOAT))
#define RCCE_MAX_DOUBLE (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_DOUBLE))
#define RCCE_MIN_INT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_INT))
#define RCCE_MIN_LONG (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_LONG))
#define RCCE_MIN_FLOAT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_FLOAT))
#define RCCE_MIN_DOUBLE (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_DOUBLE))
#define RCCE_PROD_INT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_INT))
#define RCCE_PROD_LONG (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_LONG))
#define RCCE_PROD_FLOAT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_FLOAT))
#define RCCE_PROD_DOUBLE (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_DOUBLE))
#define RCCE_COMM_INITIALIZED 45328976
#define RCCE_COMM_NOT_INITIALIZED -45328976
// auxiliary MPB pointer type
typedef volatile unsigned int* t_vintp;
// Also need dereferenced types
typedef volatile unsigned char t_vchar;
typedef volatile unsigned int t_vint;
typedef struct rcce_block {
t_vcharp space; // pointer to space for data in block
size_t free_size; // actual free space in block (0 or whole block)
size_t size; // size of an allocated block
struct rcce_block *next; // pointer to next block in circular linked list
} RCCE_BLOCK;
#if defined(SINGLEBITFLAGS) || defined(USE_BYTE_FLAGS)
typedef struct rcce_flag_line {
char flag[RCCE_FLAGS_PER_LINE];
t_vcharp line_address;
int members;
struct rcce_flag_line *next;
} RCCE_FLAG_LINE;
#endif
typedef struct {
RCCE_BLOCK *tail; // "last" block in linked list of blocks
} RCCE_BLOCK_S;
#ifdef AIR
#define FPGA_BASE 0xf9000000
#define BACKOFF_MIN 8
#define BACKOFF_MAX 256
typedef volatile struct _RCCE_AIR {
int * counter;
int * init;
} RCCE_AIR;
#endif
#ifndef GORY
extern RCCE_FLAG RCCE_sent_flag[RCCE_MAXNP];
extern RCCE_FLAG RCCE_ready_flag[RCCE_MAXNP];
#ifdef USE_PIPELINE_FLAGS
extern RCCE_FLAG RCCE_sent_flag_pipe[RCCE_MAXNP];
extern RCCE_FLAG RCCE_ready_flag_pipe[RCCE_MAXNP];
#endif
#ifdef USE_PROBE_FLAGS
extern RCCE_FLAG RCCE_probe_flag[RCCE_MAXNP];
#endif
extern t_vcharp RCCE_buff_ptr;
extern size_t RCCE_chunk;
extern t_vcharp RCCE_flags_start;
#ifndef USE_REMOTE_PUT_LOCAL_GET
extern RCCE_SEND_REQUEST* RCCE_send_queue;
extern RCCE_RECV_REQUEST* RCCE_recv_queue[RCCE_MAXNP];
#else
extern RCCE_SEND_REQUEST* RCCE_send_queue[RCCE_MAXNP];
extern RCCE_RECV_REQUEST* RCCE_recv_queue;
#endif
#endif
//#ifdef USE_FLAG_EXPERIMENTAL
extern t_vcharp RCCE_flag_buffer[RCCE_MAXNP];
//#endif
#ifndef __hermit__
extern t_vcharp RCCE_fool_write_combine_buffer;
#endif
extern t_vcharp RCCE_comm_buffer[RCCE_MAXNP];
extern int RCCE_NP;
extern int RCCE_BUFF_SIZE;
#ifndef COPPERRIDGE
extern omp_lock_t RCCE_corelock[RCCE_MAXNP];
extern t_vchar RC_comm_buffer[RCCE_MAXNP*RCCE_BUFF_SIZE_MAX];
extern t_vchar RC_shm_buffer[RCCE_SHM_SIZE_MAX];
#endif
extern int RC_MY_COREID;
extern int RC_COREID[RCCE_MAXNP];
extern double RC_REFCLOCKGHZ;
extern int RCCE_IAM;
extern int RCCE_debug_synch;
extern int RCCE_debug_comm;
extern int RCCE_debug_debug;
extern int RCCE_debug_RPC;
#ifdef SINGLEBITFLAGS
extern RCCE_FLAG_LINE RCCE_flags;
extern int WORDSIZE;
extern int LEFTMOSTBIT;
RCCE_FLAG_STATUS RCCE_bit_value(t_vcharp, int);
RCCE_FLAG_STATUS RCCE_flip_bit_value(t_vcharp, int);
int RCCE_write_bit_value(t_vcharp, int, RCCE_FLAG_STATUS);
#endif
extern int RCCE_comm_init_val;
void RCCE_malloc_init(t_vcharp, size_t);
void RCCE_shmalloc_init(t_vcharp, size_t);
int RCCE_qsort(char *, size_t, size_t, int (*)(const void*, const void*));
int id_compare(const void *, const void *);
#if 0
int RCCE_probe(RCCE_FLAG);
#endif
int RCCE_error_return(int, int);
#ifdef __hermit__
#define RC_cache_invalidate() {}
#else
void RC_cache_invalidate(void);
#endif
int RCCE_acquire_treelock(RCCE_COMM*);
int RCCE_release_treelock(RCCE_COMM*);
int RCCE_TNS_barrier(RCCE_COMM*);
int RCCE_acquire_lock(int);
int RCCE_try_lock(int);
int RCCE_backoff_lock(int);
int RCCE_release_lock(int);
int RCCE_global_color(int, void *);
t_vcharp RC_COMM_BUFFER_START(int);
//#ifdef USE_FLAG_EXPERIMENTAL
t_vcharp RC_FLAG_BUFFER_START(int);
//#endif
#ifndef GORY
t_vcharp RCCE_malloc(size_t);
t_vcharp RCCE_malloc_request(size_t, size_t *);
t_vcharp RCCE_palloc(size_t, int);
void RCCE_free(t_vcharp);
int RCCE_put(t_vcharp, t_vcharp, int, int);
int RCCE_get(t_vcharp, t_vcharp, int, int);
int RCCE_wait_until(RCCE_FLAG, RCCE_FLAG_STATUS);
int RCCE_test_flag(RCCE_FLAG, RCCE_FLAG_STATUS, int *);
int RCCE_flag_alloc(RCCE_FLAG *);
int RCCE_flag_free(RCCE_FLAG *);
int RCCE_flag_write(RCCE_FLAG *, RCCE_FLAG_STATUS, int);
int RCCE_flag_read(RCCE_FLAG, RCCE_FLAG_STATUS *, int);
#ifdef USE_FLAG_EXPERIMENTAL
int RCCE_put_flag(t_vcharp, t_vcharp, int, int);
int RCCE_get_flag(t_vcharp, t_vcharp, int, int);
#endif
#ifdef USE_TAGGED_FLAGS
int RCCE_flag_write_tagged(RCCE_FLAG *, RCCE_FLAG_STATUS, int, void*, int);
int RCCE_flag_read_tagged(RCCE_FLAG, RCCE_FLAG_STATUS *, int, void*, int);
int RCCE_wait_tagged(RCCE_FLAG, RCCE_FLAG_STATUS, void *, int);
int RCCE_test_tagged(RCCE_FLAG, RCCE_FLAG_STATUS, int *, void *, int);
#endif
#endif
#if defined(_OPENMP) && !defined(__hermit__)
#pragma omp threadprivate (RC_COREID, RC_MY_COREID, RC_REFCLOCKGHZ)
#pragma omp threadprivate (RCCE_comm_buffer)
#pragma omp threadprivate (RCCE_BUFF_SIZE)
#pragma omp threadprivate (RCCE_IAM, RCCE_NP)
#pragma omp threadprivate (RCCE_debug_synch, RCCE_debug_comm, RCCE_debug_debug)
#ifdef SINGLEBITFLAGS
#pragma omp threadprivate (RCCE_flags, WORDSIZE, LEFTMOSTBIT)
#endif
#ifndef GORY
#pragma omp threadprivate (RCCE_send_queue, RCCE_recv_queue)
#pragma omp threadprivate (RCCE_sent_flag, RCCE_ready_flag)
#ifdef USE_PROBE_FLAGS
#pragma omp threadprivate (RCCE_probe_flag)
#endif
#ifdef USE_PIPELINE_FLAGS
#pragma omp threadprivate (RCCE_sent_flag_pipe, RCCE_ready_flag_pipe)
#endif
#pragma omp threadprivate (RCCE_buff_ptr, RCCE_chunk)
#pragma omp threadprivate (RCCE_flags_start)
#endif
#endif
#ifdef SHMADD
unsigned int getCOREID();
unsigned int readTILEID();
unsigned int readLUT(unsigned int);
void writeLUT(unsigned int, unsigned int);
#endif
#endif
|
level.c | // RUN: %compile-run-and-check
#include <omp.h>
#include <stdio.h>
const int MaxThreads = 1024;
const int NumThreads = 64;
int main(int argc, char *argv[]) {
int level = -1, activeLevel = -1;
// The expected value is -1, initialize to different value.
int ancestorTNumNeg = 1, teamSizeNeg = 1;
int ancestorTNum0 = -1, teamSize0 = -1;
// The expected value is -1, initialize to different value.
int ancestorTNum1 = 1, teamSize1 = 1;
int check1[MaxThreads];
int check2[MaxThreads];
int check3[MaxThreads];
int check4[MaxThreads];
for (int i = 0; i < MaxThreads; i++) {
check1[i] = check2[i] = check3[i] = check4[i] = 0;
}
#pragma omp target map(level, activeLevel, ancestorTNumNeg, teamSizeNeg) \
map(ancestorTNum0, teamSize0, ancestorTNum1, teamSize1) \
map(check1[:], check2[:], check3[:], check4[:])
{
level = omp_get_level();
activeLevel = omp_get_active_level();
// Expected to return -1.
ancestorTNumNeg = omp_get_ancestor_thread_num(-1);
teamSizeNeg = omp_get_team_size(-1);
// Expected to return 0 and 1.
ancestorTNum0 = omp_get_ancestor_thread_num(0);
teamSize0 = omp_get_team_size(0);
// Expected to return -1 because the requested level is larger than
// the nest level.
ancestorTNum1 = omp_get_ancestor_thread_num(1);
teamSize1 = omp_get_team_size(1);
// Expecting active parallel region.
#pragma omp parallel num_threads(NumThreads)
{
int id = omp_get_thread_num();
// Multiply return value of omp_get_level by 5 to avoid that this test
// passes if both API calls return wrong values.
check1[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
check2[id] += omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the current thread num.
check2[id] += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the current number of threads.
check2[id] += 3 * omp_get_team_size(1);
// Expected to return -1, see above.
check2[id] += omp_get_ancestor_thread_num(2) + omp_get_team_size(2);
// Expecting serialized parallel region.
#pragma omp parallel
{
#pragma omp atomic
check3[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
int check4Inc = omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the parent thread num.
check4Inc += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the number of threads in the active parallel region.
check4Inc += 3 * omp_get_team_size(1);
// Expected to return 0 and 1.
check4Inc += omp_get_ancestor_thread_num(2) + 3 * omp_get_team_size(2);
// Expected to return -1, see above.
check4Inc += omp_get_ancestor_thread_num(3) + omp_get_team_size(3);
#pragma omp atomic
check4[id] += check4Inc;
}
}
}
// CHECK: target: level = 0, activeLevel = 0
printf("target: level = %d, activeLevel = %d\n", level, activeLevel);
// CHECK: level = -1: ancestorTNum = -1, teamSize = -1
printf("level = -1: ancestorTNum = %d, teamSize = %d\n", ancestorTNumNeg, teamSizeNeg);
// CHECK: level = 0: ancestorTNum = 0, teamSize = 1
printf("level = 0: ancestorTNum = %d, teamSize = %d\n", ancestorTNum0, teamSize0);
// CHECK: level = 1: ancestorTNum = -1, teamSize = -1
printf("level = 1: ancestorTNum = %d, teamSize = %d\n", ancestorTNum1, teamSize1);
// CHECK-NOT: invalid
for (int i = 0; i < MaxThreads; i++) {
// Check active parallel region:
// omp_get_level() = 1, omp_get_active_level() = 1
const int Expected1 = 6;
if (i < NumThreads) {
if (check1[i] != Expected1) {
printf("invalid: check1[%d] should be %d, is %d\n", i, Expected1, check1[i]);
}
} else if (check1[i] != 0) {
printf("invalid: check1[%d] should be 0, is %d\n", i, check1[i]);
}
// 5 * 1 + 3 * 64 - 1 - 1 (see above)
const int Expected2 = 195;
if (i < NumThreads) {
if (check2[i] != Expected2) {
printf("invalid: check2[%d] should be %d, is %d\n", i, Expected2, check2[i]);
}
} else if (check2[i] != 0) {
printf("invalid: check2[%d] should be 0, is %d\n", i, check2[i]);
}
// Check serialized parallel region:
// omp_get_level() = 2, omp_get_active_level() = 1
const int Expected3 = 11;
if (i < NumThreads) {
if (check3[i] != Expected3) {
printf("invalid: check3[%d] should be %d, is %d\n", i, Expected3, check3[i]);
}
} else if (check3[i] != 0) {
printf("invalid: check3[%d] should be 0, is %d\n", i, check3[i]);
}
// 5 * 1 + 3 * 64 + 3 * 1 - 1 - 1 (see above)
const int Expected4 = 198;
if (i < NumThreads) {
if (check4[i] != Expected4) {
printf("invalid: check4[%d] should be %d, is %d\n", i, Expected4, check4[i]);
}
} else if (check4[i] != 0) {
printf("invalid: check4[%d] should be 0, is %d\n", i, check4[i]);
}
}
// Check for paraller level in non-SPMD kernels.
level = 0;
#pragma omp target teams distribute num_teams(1) thread_limit(32) reduction(+:level)
for (int i=0; i<5032; i+=32) {
int ub = (i+32 > 5032) ? 5032 : i+32;
#pragma omp parallel for schedule(dynamic)
for (int j=i ; j < ub; j++) ;
level += omp_get_level();
}
// CHECK: Integral level = 0.
printf("Integral level = %d.\n", level);
return 0;
}
|
TAD.h | //
// @author Adam Gibson
//
#ifndef LIBND4J_TAD_H
#define LIBND4J_TAD_H
#include <helpers/shape.h>
#include <pointercast.h>
namespace shape {
/**
* Dimension collapse is an algorithm
* for collapsing singular dimensions.
* This algorithm will adjust the dimensions
* wrt the original.
*
* The algorithm has 3 components:
* trailing ones
* middle ones
* beginning ones
*
* dimensions that are specified to reduce along
* that are singular should be truncated
*
* dimensions that are specified that are singular
* at the beginning should be removed with middle dimensions
* decremented.
*
* For any time there is a no op, a collapse will
* set the first dimension to be -1.
*
*
*/
class TAD {
public:
int tadIndex = 0;
int dimensionLength;
int *dimension = nullptr;
int *shapeInfo = nullptr;
int *tadOnlyShapeInfo = nullptr;
int numTads = 0;
int tadRank = 0;
int *tadShape = nullptr;
int *tadStride = nullptr;
Nd4jIndex *tadOffsets = nullptr;
int tadOffsetForBlock = 0;
int rank = 0;
int numOnes = 0;
//pointers to original
int originalDimensionLength;
int *originalDimension = nullptr;
int *originalShapeInfo = nullptr;
bool squeezed = false;
bool newSqueezeDimensions = false;
int numOnesInMiddle = 0;
bool wholeThing = false;
//need to track whether we create a new dimension array or not, we could have just moved the pointer forward
//due to leading ones
bool createdNewDimension = false;
// special case for CUDA, we're passing in __shared__ memory pointers to be used instead of new/malloc
void *ptrManager = nullptr;
int *ptrOutput = nullptr;
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF TAD() {}
#ifdef __CUDACC__
__host__ __device__
#endif
TAD(int tadIndex,int *shapeInfo,int *dimension,int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
#endif
TAD(int *shapeInfo,int *dimension,int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void setExternalBuffers(void *ptrManager);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void setOutputBuffer(int *ptrOutput);
#ifdef __CUDACC__
__host__ __device__
#endif
/**
* This method is for GPU mostly, it allows to initialize TAD instance with precalculated tadOnlyShapeInfo
*/
INLINEDEF void initWithExternalTAD(int *existingTAD, int *originalShape, int *dimension, int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void init(int *shapeInfo,int *dimension,int dimensionLength);
template <typename T>
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void printTADsND(T *x);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int *permuteShapeBuffer(int *shapeBuffer,int *rearrange);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void createTadOnlyShapeInfo();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int lengthPerSlice(int *shapeBuffer);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int * tad2Sub(int index);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF ~TAD();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int* permuteDims();
/**
* Compute the tad offset given a dimension.
*
* The general pattern for computing a tad offset is as follows:
* Every $STRIDE that was removed (the first dimension)
* do a jump by the major stride of the parent array
* (stride[0] of the parent array)
*
* For example given a c ordered 2,2,3,2 with stride 12,6,2,1
* A tad of dimension 1 will jump 12 every 6 tads.
*
* You then end up with offsets of:
* 0
* 1
* 2
* 3
* 4
* 5
* 12
* 13
* 14
* 15
* 16
* 17
*
* notice there are 12 tads here. This same incremental jump will happen
* every time.
* Note here that by default the
* stride of element wise stride is used for the hops.
*
* Sometimes a jump doesn't happen. If there are less tads
* than the stride of the dimension you removed, the
* element wise stride will always be used.
*
* For example in a dimension of 0,1, you end up with offsets of:
* 0,1,2,3,4,5
*
* Given that the inner most stride of the dimensions that was removed (1)
* had a stride of 6, we never need to do a major stride jump.
*
*/
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF Nd4jIndex tadOffset(int index);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int *tensorShape();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int * tad2Sub(int index, void *ptrManager);
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void createOffsets();
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int *shapeInfoOnlyShapeAndStride();
/**
* Length of a tad given
* the shape information
*/
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int tadLength(int *shapeInfo, int *dimension, int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF int tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength);
#ifdef __CUDACC__
__host__ __device__
INLINEDEF void createOffsetForBlock(int blockIdx) {
this->tadOffsetForBlock = this->tadOffset(blockIdx);
}
#endif
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF void collapse();
};
////
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF TAD::TAD(int tadIndex,int *shapeInfo,int *dimension,int dimensionLength) {
this->tadIndex = tadIndex;
this->init(shapeInfo, dimension, dimensionLength);
}
#ifdef __CUDACC__
__host__ __device__
#endif
INLINEDEF TAD::TAD(int *shapeInfo,int *dimension,int dimensionLength) {
this->init(shapeInfo, dimension, dimensionLength);
}
INLINEDEF void TAD::setExternalBuffers(void *ptrManager) {
this->ptrManager = ptrManager;
}
INLINEDEF void TAD::setOutputBuffer(int *ptrOutput) {
this->ptrOutput = ptrOutput;
}
INLINEDEF void TAD::initWithExternalTAD(int *existingTAD, int *originalShape, int *dimension, int dimensionLength) {
this->tadOnlyShapeInfo = existingTAD;
this->rank = shape::rank(originalShape);
this->originalShapeInfo = originalShape;
this->originalDimension = dimension;
this->originalDimensionLength = dimensionLength;
this->shapeInfo = originalShape;
this->dimension = dimension;
this->dimensionLength = dimensionLength;
this->tadShape = shape::shapeOf(existingTAD);
this->tadStride = shape::stride(existingTAD);
int ews = shape::elementWiseStride(originalShape);
this->numTads = shape::length(originalShape) / shape::length(existingTAD); // this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength);//shape::length(originalShape) / shape::length(existingTAD);
this->wholeThing = this->numTads == 1 || ((this->dimensionLength == this->rank || this->numTads == shape::length(this->shapeInfo)) && ews == 1);
}
INLINEDEF void TAD::init(int *shapeInfo,int *dimension,int dimensionLength) {
this->originalShapeInfo = shapeInfo;
this->originalDimension = dimension;
this->originalDimensionLength = dimensionLength;
//start off as original references
this->shapeInfo = shapeInfo;
this->dimensionLength = dimensionLength;
this->dimension = dimension;
this->rank = shape::rank(shapeInfo);
this->numTads = dimensionLength == 0 ? 1 : this->tensorsAlongDimension(this->shapeInfo, this->dimension, this->dimensionLength);
int ews = shape::elementWiseStride(shapeInfo);
if(!shape::isVector(shapeInfo))
wholeThing = this->numTads == 1 || ((this->dimensionLength == this->rank || this->numTads == shape::length(shapeInfo)) && ews == 1);
else if(shape::isScalar(shapeInfo))
wholeThing = true;
//vector case
else {
// if(dimensionLength == 1 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) {
if(dimension == 0 && shape::shapeOf(shapeInfo)[dimension[0]] == 1) {
wholeThing = true;
}
}
}
template <typename T>
INLINEDEF void TAD::printTADsND(T *x) {
if(wholeThing) {
for(int i = 0; i < shape::length(tadOnlyShapeInfo); i++) {
printf(" %f ",x[i]);
}
printf("\n");
}
else {
for (int i = 0; i < numTads; i++) {
int offset = tadOffsets[i];
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int rankIter = shape::rank(tadOnlyShapeInfo);
int xStridesIter[MAX_RANK];
T *xPointer = x + offset;
if (PrepareOneRawArrayIter<T>(rankIter,
shape::shapeOf(tadOnlyShapeInfo),
xPointer,
shape::stride(tadOnlyShapeInfo),
&rankIter,
shapeIter,
&xPointer,
xStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, shape::rank(tadOnlyShapeInfo), coord, shapeIter); {
/* Process the innermost dimension */
printf(" %f ",xPointer[0]);
}
ND4J_RAW_ITER_ONE_NEXT(dim,
rankIter,
coord,
shapeIter,
xPointer,
xStridesIter);
printf("\n");
}
else {
printf("Unable to prepare array\n");
}
}
}
}
INLINEDEF void TAD::permuteShapeBufferInPlace(int *shapeBuffer,int *rearrange,int *out) {
memcpy(out,shapeBuffer,sizeof(int) * shape::shapeInfoLength(this->rank));
doPermuteShapeBuffer(this->rank,out,rearrange);
}
INLINEDEF int* TAD::permuteShapeBuffer(int *shapeBuffer,int *rearrange) {
int len = shape::shapeInfoLength(this->rank);
int *copy = shape::copyOf(len,shapeBuffer);
doPermuteShapeBuffer(rank,copy,rearrange);
return copy;
}
INLINEDEF void TAD::createTadOnlyShapeInfo() {
this->tadOnlyShapeInfo = this->shapeInfoOnlyShapeAndStride();
if (this->tadShape != nullptr)
delete[] this->tadShape;
this->tadShape = shape::shapeOf(this->tadOnlyShapeInfo);
this->tadStride = shape::stride(this->tadOnlyShapeInfo);
/* if(tadIndex > 0) {
this->createOffsets();
this->tadOnlyShapeInfo[shape::shapeInfoLength(shape::rank(this->tadOnlyShapeInfo)) - 3] = this->tadOffsets[tadIndex];
}*/
}
INLINEDEF int TAD::lengthPerSlice(int *shapeBuffer) {
int dimension = 0;
int *remove = shape::removeIndex(shape::shapeOf(shapeBuffer),&dimension,shape::rank(shapeBuffer),1);
int prod = shape::prod(remove,shape::rank(shapeBuffer) - 1);
delete[] remove;
return prod;
}
INLINEDEF int * TAD::tad2Sub(int index) {
int *shape = shape::shapeOf(shapeInfo);
int rank = shape::rank(shapeInfo);
int leftOverIndexLen = rank - originalDimensionLength;
#ifdef __CUDACC__
int *ret;
int *tadShape;
int *leftOverIndexes;
int *sub;
if (ptrManager != nullptr) {
UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager;
ret = manager->getTempRankBuffer1();
tadShape = manager->getTempRankBuffer2();
leftOverIndexes = manager->getTempRankBuffer3();
sub = manager->getTempRankBuffer4();
} else {
ret = new int[rank];
tadShape = new int[leftOverIndexLen];
leftOverIndexes = new int[leftOverIndexLen];
sub = new int[rank];
}
#else
int *ret = new int[rank];
//shape of the tad
int *tadShape = new int[leftOverIndexLen];
int *leftOverIndexes = new int[leftOverIndexLen];
int *sub = new int[rank];
#endif
//indexes not specified in the tad indexes
//every coordinate starts as zero
memset(ret,0,sizeof(int) * rank);
//find the length of the elements we
//are iterating over
int len = 1;
//left over index cursor for initializing elements
int leftOverIndex = 0;
for(int i = 0; i < rank; i++) {
//look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference)
bool found = false;
for(int j = 0; j < originalDimensionLength; j++) {
//skip over specified dimensions when computing left over length
if(i == originalDimension[j]) {
found = true;
break;
}
}
//add to the indexes that aren't specified as part of the tad dimension
//indexes
if(!found) {
//accumulate the list of indexes left over used for initializing the return value
leftOverIndexes[leftOverIndex] = i;
//accumulate the tad shape
tadShape[leftOverIndex] = shape[i];
//accumulate the length (product) of the indexes that will be iterated over
len *= shape[i];
leftOverIndex++;
}
}
//sub for indices
/* int *sub = new int[leftOverIndexLen];
shape::ind2subOrder(tadShape,index,len,sub);
*/
shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub);
for(int i = 0; i < leftOverIndexLen; i++) {
ret[leftOverIndexes[i]] = sub[i];
}
if (ptrManager == nullptr) {
delete[] tadShape;
delete[] leftOverIndexes;
delete[] sub;
}
return ret;
}
INLINEDEF TAD::~TAD() {
//we may have just moved the pointer forward, we may not need to delete the pointer here
if(originalDimension != this->dimension && createdNewDimension) {
delete[] this->dimension;
}
if(this->originalShapeInfo != this->shapeInfo) {
delete[] this->shapeInfo;
}
if(this->tadOffsets != nullptr) {
delete[] this->tadOffsets;
}
if(this->tadOnlyShapeInfo != nullptr && this->tadOnlyShapeInfo != shapeInfo) {
delete[] this->tadOnlyShapeInfo;
}
}
INLINEDEF int* TAD::permuteDims() {
//permute dimensions for tad
int dimIdx = 0;
//loop backwards assuming dimension is sorted
int *permuteDims = new int[shape::rank(shapeInfo)];
for(int i = 0; i < shape::rank(shapeInfo); i++) {
bool found = false;
for(int j = 0; j < originalDimensionLength; j++) {
if(i == originalDimension[j]) {
found = true;
break;
}
}
//not found, append it to the end for permute
if(!found)
permuteDims[dimIdx++] = i;
}
for(int i = originalDimensionLength - 1; i >= 0; i--) {
permuteDims[dimIdx++] = originalDimension[i];
}
/*
for (int i = 0; i < originalDimensionLength; i++) {
permuteDims[i] = originalDimension[i];
}
*/
//permute dimensions for tad
return permuteDims;
}
INLINEDEF Nd4jIndex TAD::tadOffset(int index) {
if(tadOnlyShapeInfo == nullptr) {
this->createTadOnlyShapeInfo();
}
if(wholeThing)
return index;
if(dimensionLength > 1) {
int *tad2Sub = this->tad2Sub(index,ptrManager);
Nd4jIndex ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo));
if(ret < 0) {
if (ptrManager == nullptr)
delete[] tad2Sub;
return -1;
}
if (ptrManager == nullptr)
delete[] tad2Sub;
return ret;
}
else {
int *tad2Sub = this->tad2Sub(index,ptrManager);
Nd4jIndex ret = shape::getOffset(0,shape::shapeOf(shapeInfo),shape::stride(shapeInfo),tad2Sub,shape::rank(shapeInfo));
if (ptrManager == nullptr)
delete[] tad2Sub;
return ret;
}
}
INLINEDEF int* TAD::tensorShape() {
if(this->tadShape != nullptr)
return this->tadShape;
int *theShape = shape::shapeOf(shapeInfo);
int *tensorShape = shape::keep(theShape,dimension,dimensionLength,shape::rank(shapeInfo));
this->tadShape = tensorShape;
this->tadRank = dimensionLength;
return tensorShape;
}
INLINEDEF int * TAD::tad2Sub(int index, void *ptrManager) {
int *shape = shape::shapeOf(shapeInfo);
int rank = shape::rank(shapeInfo);
int leftOverIndexLen = rank - originalDimensionLength;
int *tadShape;
int *leftOverIndexes;
int *sub;
int *ret;
#ifdef __CUDACC__
if (ptrManager != nullptr) {
UnifiedSharedMemory *manager = (UnifiedSharedMemory *) ptrManager;
ret = manager->getTempRankBuffer1();
tadShape = manager->getTempRankBuffer2();
leftOverIndexes = manager->getTempRankBuffer3();
sub = manager->getTempRankBuffer4();
} else {
ret = new int[rank];
//shape of the tad
leftOverIndexes = new int[leftOverIndexLen];
sub = new int[rank];
tadShape = new int[leftOverIndexLen];
}
#else
ret = new int[rank];
//shape of the tad
leftOverIndexes = new int[leftOverIndexLen];
sub = new int[rank];
tadShape = new int[leftOverIndexLen];
#endif
//indexes not specified in the tad indexes
//every coordinate starts as zero
memset(ret,0,sizeof(int) * rank);
//find the length of the elements we
//are iterating over
int len = 1;
//left over index cursor for initializing elements
int leftOverIndex = 0;
for(int i = 0; i < rank; i++) {
//look for dimensions NOT found in dimension length (basically compute shape - dimension (set difference)
bool found = false;
for(int j = 0; j < originalDimensionLength; j++) {
//skip over specified dimensions when computing left over length
if(i == originalDimension[j]) {
found = true;
break;
}
}
//add to the indexes that aren't specified as part of the tad dimension
//indexes
if(!found) {
//accumulate the list of indexes left over used for initializing the return value
leftOverIndexes[leftOverIndex] = i;
//accumulate the tad shape
tadShape[leftOverIndex] = shape[i];
//accumulate the length (product) of the indexes that will be iterated over
leftOverIndex++;
len *= shape[i];
}
}
//sub for indices
/* int *sub = new int[leftOverIndexLen];
shape::ind2subOrder(tadShape,index,len,sub);
*/
shape::ind2subC(leftOverIndexLen,tadShape,index,len, sub);
for(int i = 0; i < leftOverIndexLen; i++) {
ret[leftOverIndexes[i]] = sub[i];
}
if (ptrManager == nullptr) {
delete[] leftOverIndexes;
delete[] tadShape;
delete[] sub;
}
return ret;
}
INLINEDEF void TAD::createOffsets() {
this->tadOffsets = new Nd4jIndex[this->numTads];
#pragma omp parallel for schedule(guided) proc_bind(close) default(shared)
for(int i = 0; i < this->numTads; i++) {
this->tadOffsets[i] = this->tadOffset(i);
}
}
INLINEDEF int* TAD::shapeInfoOnlyShapeAndStride() {
if(wholeThing && (dimensionLength == 1 && dimension[0] == MAX_DIMENSION) )
return shape::createScalarShapeInfo();
//ensure tad shapes get setup right for vectors
if(dimensionLength > 1 && shape::isVector(shapeInfo))
return shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)),shapeInfo);
// case when tad coincides with whole array
if( this->numTads == 1 && ((shape::rank(originalShapeInfo) == originalDimensionLength) || originalDimensionLength == 0)) {
// we might have special case here: skipped dimensions might be just full of ones
int *ret = shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)), shapeInfo);
if (shape::isDimPermuted(dimension, dimensionLength)) // check whether we need permutation
shape::doPermuteShapeBuffer(ret, dimension);
return ret;
}
int *theShape = shape::shapeOf(shapeInfo);
int rank = shape::rank(shapeInfo);
if(dimensionLength == 1) {
if(dimension[0] == 0 && shape::isVector(shapeInfo) && theShape[1] == 1) {
int permuted[2] = {1,0};
int *permutedRet2 = shape::permuteShapeBuffer(shapeInfo,permuted);
return permutedRet2;
} else if(dimension[0] == 1 && shape::isVector(shapeInfo) && theShape[0] == 1) {
return shape::copyOf(shape::shapeInfoLength(shape::rank(shapeInfo)),shapeInfo);
}
else if(shape::shapeOf(shapeInfo)[dimension[0]] == 1) {
int *scalarInfo = shape::createScalarShapeInfo();
scalarInfo[shape::shapeInfoLength(shape::rank(scalarInfo)) - 3] = this->tadIndex;
return scalarInfo;
}
}
int *tensorShape = this->tensorShape();
int *reverseDimensions = shape::reverseCopy(dimension,dimensionLength);
int *rankRange = shape::range(0,rank);
int *remove = shape::removeIndex(rankRange,dimension,rank,dimensionLength);
//concat is wrong here with the length
int *newPermuteDims = shape::concat(remove,rank - dimensionLength,reverseDimensions,dimensionLength);
int *permuted = shape::permuteShapeBuffer(shapeInfo,newPermuteDims);
int sliceIndex = shape::sliceOffsetForTensor(shape::rank(permuted),
this->tadIndex,
shape::shapeOf(shapeInfo),
tensorShape,
dimensionLength,
dimension,
dimensionLength);
int *ret2 = shape::sliceOfShapeBuffer(sliceIndex,permuted);
int tensorLength = shape::prod(tensorShape,tadRank);
int compLength = shape::isVector(ret2) ? shape::length(ret2) : shape::prod(tensorShape,tadRank);
// const bool isLikeVector = shape::isLikeVector(ret2);
// if(dimensionLength == tadRank && compLength == shape::length(ret2) && !isLikeVector) {
if(dimensionLength == tadRank && compLength == shape::length(ret2)) {
if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) {
//go to the bottom and return ret2 after proper freeing of pointers
//basic idea; we *don't* permute row vectors
}
else if(dimensionLength > 1) {
//permute *then* return ret2
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
shape::permuteShapeBufferInPlace(ret2,finalPermuteDims,ret2);
delete[] finalPermuteDims;
}
}
else {
int length = tensorLength;
int lengthPerSlice = this->lengthPerSlice(ret2);
int offset = tadIndex * tensorLength /lengthPerSlice;
if(sliceIndex == 0 && length == lengthPerSlice) {
int *newRet2 = shape::sliceOfShapeBuffer(offset,ret2);
delete[] ret2;
ret2 = newRet2;
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
// bool isRowVector2 = shape::isRowVector(ret2) && !isLikeVector;
bool isRowVector2 = shape::isRowVector(ret2);
if(isRowVector2 == false) {
shape::permuteShapeBufferInPlace(ret2, finalPermuteDims, ret2);
}
delete[] finalPermuteDims;
}
else if(length == lengthPerSlice) {
offset -= shape::slices(ret2) * (offset / shape::slices(ret2));
int *newRet2 = shape::sliceOfShapeBuffer(offset,ret2);
delete[] ret2;
ret2 = newRet2;
if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) {
//go to the bottom and return ret2 after proper freeing of pointers
//basic idea; we *don't* permute row vectors
}
else {
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
int *newRet = shape::permuteShapeBuffer(ret2,finalPermuteDims);
delete[] ret2;
delete[] finalPermuteDims;
ret2 = newRet;
}
}
else {
//execute final part, note that this is mainly so delete[] gets called
//at the bottom of the method
while(shape::length(ret2) > length) {
int lengthPerSlice2 = this->lengthPerSlice(ret2);
sliceIndex = sliceOffsetForTensor(sliceIndex,shape::length(ret2),lengthPerSlice2);
sliceIndex -= shape::slices(ret2) * (sliceIndex / shape::slices(ret2));
int *newRet2 = shape::sliceOfShapeBuffer(sliceIndex,ret2);
delete[] ret2;
ret2 = newRet2;
}
//don't permute on a row vector
if(dimensionLength == 1 && shape::isVector(ret2) && shape::shapeOf(ret2)[0] == 1) {
//go to the bottom and return ret2 after proper freeing of pointers
//basic idea; we *don't* permute row vectors
}
else if(dimensionLength > 1){
//permute *then* return ret
int *finalPermuteDims = new int[shape::rank(ret2)];
int forward = 0;
for(int i = shape::rank(ret2) - 1; i >= 0; i--) {
finalPermuteDims[forward++] = i;
}
int *newPermute = shape::permuteShapeBuffer(ret2,finalPermuteDims);
delete[] ret2;
delete[] finalPermuteDims;
ret2 = newPermute;
}
}
}
delete[] permuted;
delete[] newPermuteDims;
delete[] rankRange;
delete[] remove;
delete[] reverseDimensions;
return ret2;
}
INLINEDEF int TAD::tadLength(int *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
INLINEDEF int TAD::tensorsAlongDimension(int *shapeInfo, int *dimension, int dimensionLength) {
return shape::length(shapeInfo) / this->tadLength(shapeInfo,dimension,dimensionLength);
}
INLINEDEF void TAD::collapse() {
int *shape = shape::shapeOf(shapeInfo);
//handle negative dimensions/backwards indexing
for(int i = 0; i < dimensionLength; i++) {
if((dimension)[i] < 0)
(dimension)[i] += shape::rank(this->shapeInfo);
}
this->dimension = new int[dimensionLength];
memcpy(this->dimension,this->originalDimension,sizeof(int) * dimensionLength);
//we can drop trailing dimensions where it's all singular for example:
// shape: 4,3,1,2
//dimension: 0,2
// the problem for 0,2 is equivalent to: 0
//the rest of the algorithm handles cases suchas
//shape: 4,1,1,2
//dimension: 0,1
//when this happens there are other dimensions (eg: at the end) that matter
int trailingOneDimensions = 0;
//trailing ones
for(int i = dimensionLength - 1; i >= 0; i--) {
if(shape[dimension[i]] != 1) {
break;
}
else if(shape[dimension[i]] == 1)
trailingOneDimensions++;
}
dimensionLength -= trailingOneDimensions;
int leadingOneDimensions = 0;
//trailing ones
for(int i = 0; i < dimensionLength; i++) {
if(shape[dimension[i]] != 1) {
break;
}
else if(shape[dimension[i]] == 1)
leadingOneDimensions++;
}
//bump the dimension pointer forward for however many leadingones there are
dimension += leadingOneDimensions;
//decrease the dimension length by the amount of leading ones
dimensionLength -= leadingOneDimensions;
bool preConverged = true;
for(int i = 0; i < dimensionLength; i++) {
if(shape[dimension[i]] == 1) {
preConverged = false;
break;
}
}
//we took away all the singular dimensions, we can just return
if(preConverged)
return;
//no more singular dimensions specified
bool done = false;
int onesDecrement = 0;
bool changed = false;
while(!done) {
//terminate early: only singular dimensions specified for reduce
if((dimensionLength) < 1) {
done = true;
//signal as a no op
dimension[0] = -1;
break;
}
//captures intermediary result from the for loop
traceNew(3);
int intermediaryResult[MAX_RANK];
for(int i = 0; i < dimensionLength; i++) {
intermediaryResult[i] = (dimension)[i];
}
bool oneEncountered = false;
bool nonOneEncountered = false;
bool hitBeginning = false;
//assume intermediate collapsing of dimensions
bool collapseMiddleDimensions = true;
//note here that dimension length MAY end up being zero
for(int i = (dimensionLength) - 1; i >= 0; i--) {
if(shape[(dimension)[i]] == 1) {
oneEncountered = true;
//trailing ones
if(!nonOneEncountered) {
//just drop trailing ones
dimensionLength--;
nonOneEncountered = false;
collapseMiddleDimensions = false;
//intermediary result just needs to have the results copied from dimension since we're just removing the tail
memcpy(intermediaryResult,dimension,sizeof(int) * dimensionLength);
changed = true;
//break the for loop and force it to go back around starting from the new index
break;
}
else {
//already decremented all dimensions
//this was a result of hitting beginning ones
//we will only need to loop once
if(i == 0) {
hitBeginning = true;
}
//will need to shift dimensions that aren't trailing ones
//back by onesDecrement
//mark the intermediary result as -1 for non inclusion
intermediaryResult[i] = -1;
onesDecrement++;
}
}
else {
intermediaryResult[i] = (dimension)[i];
nonOneEncountered = true;
}
}
if(collapseMiddleDimensions && oneEncountered) {
//collapse dimensions
int newIntermediary[MAX_RANK];
int idx = 0;
for(int i = 0; i < dimensionLength; i++) {
//of note: dimension will decrease by the number of ones encountered
if(intermediaryResult[i] >= 0) {
//dimension 0 doesn't need to be decremented
if(intermediaryResult[i] > 0)
newIntermediary[idx++] = intermediaryResult[i] - onesDecrement;
else
newIntermediary[idx++] = intermediaryResult[i];
}
}
//decrement by the number of dimensions where ones appeared
(dimensionLength) -= onesDecrement;
//update to current result
memcpy(dimension,newIntermediary,sizeof(int) * (dimensionLength));
changed = true;
}
//converged: no need to change result
else {
//update to current result
memcpy(dimension,intermediaryResult,sizeof(int) * dimensionLength);
}
//converge when there are no singular dimensions specified in the reduce
done = (!oneEncountered && nonOneEncountered) || hitBeginning;
//delete[] intermediaryResult;
}
//nothing changed but need to collapse dimension
if(!changed && this->numOnes > 0) {
for(int i = 0; i < dimensionLength ;i++) {
dimension[i] -= numOnes;
}
}
}
}
#endif //LIBND4J_TAD_H
|
ast-dump-openmp-target-teams-distribute-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:4:1, col:54>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:10:1, col:54>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:17:1, col:66>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:24:1, col:66>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:64> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeParallelForSimdDirective {{.*}} <line:31:1, col:66>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:55, col:65>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:64> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:64> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
k-means.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <time.h>
#define MAX_CHAR_PER_LINE 128
float** read_file(char *filename,int *numImg,int *numFea)
{
float **images;
int i, j, len;
ssize_t numBytesRead;
FILE *infile;
char *line, *ret;
int lenghtline;
if ((infile = fopen(filename, "r")) == NULL)
{
fprintf(stderr, "Error: file is not present (%s)\n", filename); // prints error message if file is not read
return NULL;
}
lenghtline = MAX_CHAR_PER_LINE; // assigning the number of data points per line
line = (char*) malloc(lenghtline); // allocates the memory for the lenghtline variable
assert(line != NULL);
(*numImg) = 0;
while (fgets(line, lenghtline, infile) != NULL) // make sure all the input is received
{
while (strlen(line) == lenghtline-1)
{
len = strlen(line); // gets the length of the line
fseek(infile, -len, SEEK_CUR);
lenghtline += MAX_CHAR_PER_LINE;
line = (char*) realloc(line, lenghtline);
assert(line != NULL);
ret = fgets(line, lenghtline, infile);
assert(ret != NULL);
}
if (strtok(line, " \t\n") != 0) // breaks the string according to the delimiter mentioned
(*numImg)++; // gets the number of images present in the file
}
rewind(infile); // points to the begining of the file
(*numFea) = 0; // this specifies number of points in each images
while (fgets(line, lenghtline, infile) != NULL)
{
if (strtok(line, " \t\n") != 0) // divide the string into tokens
{
while (strtok(NULL, " ,\t\n") != NULL) (*numFea)++;// eliminate the fist element of every line as first element is the image_ID
break;
}
}
rewind(infile);
len = (*numImg) * (*numFea); // product of number of images and number of in each images
images = (float**)malloc((*numImg) * sizeof(float*)); // assign the memory for each object based upon the number of images
assert(images != NULL);
images[0] = (float*) malloc(len * sizeof(float));
assert(images[0] != NULL);
for (i=1; i<(*numImg); i++) // for each images in the file
{
images[i] = images[i-1] + (*numFea);// assign the elements to each images
}
i = 0;
while (fgets(line,lenghtline, infile) != NULL)
{
if (strtok(line, " \t\n") == NULL) continue;
for (j=0; j<(*numFea); j++) // based upon number of images all the data-points will be stored
images[i][j] = atof(strtok(NULL, " ,\t\n")); //reads every data-points (converting string to float)
i++;
}
fclose(infile);
free(line);
return images;
}
int output(char *filename,int numClusters, int numImg, int numFea, float **clusters,int *cluster_id)
{
FILE *fptr;
int i, j;
char outputfile[1024];
sprintf(outputfile, "%s.cluster_centres", filename);
fptr = fopen(outputfile, "w");
fprintf(fptr,"=============The final cluster centroid location==============\n");
for (i=0; i<numClusters; i++) {
fprintf(fptr, "%d ", i);
for (j=0; j<numFea; j++)
fprintf(fptr, "%f ", clusters[i][j]); // The final location of the cluster centroid
fprintf(fptr, "\n");
}
fclose(fptr);
sprintf(outputfile, "%s.cluster_id", filename);
fptr = fopen(outputfile, "w");
fprintf(fptr,"|IMAGE_ID | CLUSTER_ID|\n");
for (i=0; i<numImg; i++)
fprintf(fptr, "|%d\t |%d\t |\n", i, cluster_id[i]);// the image_ID along with the its respective cluster_id
fclose(fptr);
return 1;
}
static float euclidean_distance(int ele,float *image,float *clust)
{
int i;
float ans=0.0;
for (i=0; i<ele; i++) // for each elements in the cluster centroid and the images
ans += (image[i]-clust[i]) * (image[i]-clust[i]); // calculating the distance between the cluster centroid and the each points in the images
return(ans);
}
static int cluster_index(int numClusters,int numFea,float *object,float **clusters)
{
int index, i;
float dist, min_dist;
index = 0;
min_dist = euclidean_distance(numFea, object, clusters[0]);// calculating the euclidean distances for the first cluster
for (i=1; i<numClusters; i++) // check the distances for the other clusters centroid
{
dist = euclidean_distance(numFea, object, clusters[i]);
if (dist < min_dist) // compare the distance with the distance for the first cluster centroid
{
min_dist = dist;
index = i; // get the index in which the image belong
}
}
return(index);
}
float** kmeans_omp(float **images,int numFea,int numImg,int numClusters,float threshold,int *cluster_id)
{
int i, j, k, index, loop=0;
int *newClusterSize; // number of elements in the new clusters
float change; // specifies the number of object that changed their cluster
float **clusters; // this will be the cluster centroid
float **newClusters; // this will be the cluster centroid new location after finding out mean of the elements in that cluster
double timing;
int nthreads; // specifies the number of thread
int **local_newClusterSize;
float ***local_newClusters;
nthreads = omp_get_max_threads();
clusters = (float**) malloc(numClusters * sizeof(float*)); // allocate the memory for the cluster centroid based upon the number of cluster
assert(clusters != NULL);
clusters[0] = (float*) malloc(numClusters * numFea * sizeof(float));// for each cluster allocate the memory based upon the number of cluster and the number of points in the images
assert(clusters[0] != NULL);
for (i=1; i<numClusters; i++)
clusters[i] = clusters[i-1] + numFea;
for (i=0; i<numClusters; i++)
for (j=0; j<numFea; j++)
clusters[i][j] = images[i][j]; // The first K number of the images will be selected as the centroid of the cluster
for (i=0; i<numImg; i++) cluster_id[i] = -1;// initially all the images cluster_id will be set to -1
newClusterSize = (int*) calloc(numClusters, sizeof(int));
assert(newClusterSize != NULL);
newClusters = (float**) malloc(numClusters * sizeof(float*));// allocate the memory for the new cluster based upon the number of cluster
assert(newClusters != NULL);
newClusters[0] = (float*) calloc(numClusters * numFea, sizeof(float));// allocate the memory for the each new cluster formed based upon the number of cluster and the number of points in the images
assert(newClusters[0] != NULL);
for (i=1; i<numClusters; i++)
newClusters[i] = newClusters[i-1] + numFea;
local_newClusterSize = (int**) malloc(nthreads * sizeof(int*));// allocate the memory for the local_newclustersize variable
assert(local_newClusterSize != NULL);
local_newClusterSize[0] = (int*) calloc(nthreads*numClusters,
sizeof(int));
assert(local_newClusterSize[0] != NULL);
for (i=1; i<nthreads; i++)
local_newClusterSize[i] = local_newClusterSize[i-1]+numClusters;
local_newClusters =(float***)malloc(nthreads * sizeof(float**)); // allocate the memory for the local_newcluster variable
assert(local_newClusters != NULL);
local_newClusters[0] =(float**) malloc(nthreads * numClusters *
sizeof(float*));
assert(local_newClusters[0] != NULL);
for (i=1; i<nthreads; i++)
local_newClusters[i] = local_newClusters[i-1] + numClusters;
for (i=0; i<nthreads; i++)
{
for (j=0; j<numClusters; j++)
{
local_newClusters[i][j] = (float*)calloc(numFea,
sizeof(float));
assert(local_newClusters[i][j] != NULL);
}
}
do {
change = 0.0;
#pragma omp parallel \
shared(images,clusters,cluster_id,local_newClusters,local_newClusterSize)
{
int tid = omp_get_thread_num();
#pragma omp for \
private(i,j,index) \
firstprivate(numImg,numClusters,numFea) \
schedule(static) \
reduction(+:change)
for (i=0; i<numImg; i++) // parallel the for loop
{
index = cluster_index(numClusters, numFea,images[i], clusters);// call the index function to get index for each images
if (cluster_id[i] != index) change += 1.0; // increment the change value when the images changes its membership
cluster_id[i] = index; // The cluster_id[image_id] is equal to the index
local_newClusterSize[tid][index]++; // increment the local_newClusterSize if the data-point falls in that cluster
for (j=0; j<numFea; j++)
local_newClusters[tid][index][j] += images[i][j]; // add the data-point to the local_newClusters of that particular index
}
}
for (i=0; i<numClusters; i++)
{
for (j=0; j<nthreads; j++)
{
newClusterSize[i] += local_newClusterSize[j][i];// assign the value of the local_newClusterSize to the newClusterSize
local_newClusterSize[j][i] = 0.0;
for (k=0; k<numFea; k++)
{
newClusters[i][k] += local_newClusters[j][i][k]; // assign the value present in the local_newClusters to the newClusters
local_newClusters[j][i][k] = 0.0;
}
}
}
for (i=0; i<numClusters; i++) {
for (j=0; j<numFea; j++) {
if (newClusterSize[i] > 1)
clusters[i][j] = newClusters[i][j] / newClusterSize[i];// calculate the mean to get the new cluster centroid location
newClusters[i][j] = 0.0;
}
newClusterSize[i] = 0;
}
change /= numImg;
} while (change > threshold && loop++ < 500); // check the condition
free(local_newClusterSize[0]);
free(local_newClusterSize);
for (i=0; i<nthreads; i++)
for (j=0; j<numClusters; j++)
free(local_newClusters[i][j]);
free(local_newClusters[0]);
free(local_newClusters);
free(newClusters[0]);
free(newClusters);
free(newClusterSize);
return clusters;
}
int main(int argc, char **argv) {
int i, j, nthreads;
int output_timing;
int numClusters, numFea, numImg;
int *cluster_id;
char *filename;
float **images;
float **clusters;
float threshold;
// default values
nthreads = 4;
numClusters = 0;
threshold = 0.001;
numClusters = 0;
filename = NULL;
filename =argv[1]; // get the filename
printf("Enter the number of clusters\n"); // get the number of clusters
scanf("%d",&numClusters);
if (filename == 0 || numClusters <= 1) printf("please enter in this format 'seq color.txt'\n ");// if the filename and the number of cluster print the message
printf("The filename you have entered = %s\n", filename);
printf("The number of cluster you have entered = %d\n", numClusters);
if (nthreads > 0)
omp_set_num_threads(nthreads); // allocate the thread
clock_t begin = clock(); // start the clock
images = read_file(filename, &numImg, &numFea); // read the data-points from the text file
if (images == NULL) exit(1);
cluster_id = (int*) malloc(numImg * sizeof(int)); // cluster_id specifies in which index does the image belong
assert(cluster_id != NULL);
clusters = kmeans_omp(images, numFea, numImg, numClusters, threshold, cluster_id);// calls the parallel kmeans
free(images[0]);
free(images);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; // specify the time spent in the execution
output(filename, numClusters, numImg, numFea, clusters, cluster_id);// output the final cluster centroid and the membership of the each images
free(cluster_id);
free(clusters[0]);
free(clusters);
printf("========The parallel Implementation of the K-Means=======\n");
printf("Computation timing = %10.4f sec\n", time_spent);
return(0);
} |
flowinfo_metadata.c | /*
* Copyright 2014-2016 Nippon Telegraph and Telephone Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file flowinfo_metadata.c
* @brief Optimized flow database for dataplane, for metadata
*/
#include <stdlib.h>
#include "openflow.h"
#include "lagopus/flowdb.h"
#include "pktbuf.h"
#include "packet.h"
#include "lagopus/flowinfo.h"
#define OXM_FIELD_TYPE(field) ((field) >> 1)
#define METADATA_BITLEN (64)
static lagopus_result_t
add_flow_metadata_mask(struct flowinfo *, struct flow *);
static lagopus_result_t
del_flow_metadata_mask(struct flowinfo *, struct flow *);
static struct flow *
match_flow_metadata_mask(struct flowinfo *, struct lagopus_packet *,
int32_t *);
static struct flow *
find_flow_metadata_mask(struct flowinfo *, struct flow *);
static void
destroy_flowinfo_metadata_mask(struct flowinfo *);
static lagopus_result_t
add_flow_metadata(struct flowinfo *, struct flow *);
static lagopus_result_t
del_flow_metadata(struct flowinfo *, struct flow *);
static struct flow *
match_flow_metadata(struct flowinfo *, struct lagopus_packet *, int32_t *);
static struct flow *
find_flow_metadata(struct flowinfo *, struct flow *);
static void
destroy_flowinfo_metadata(struct flowinfo *);
static lagopus_result_t
get_match_metadata(const struct match_list *match_list,
uint64_t *metadata,
uint64_t *mask) {
const struct match *match;
TAILQ_FOREACH(match, match_list, entry) {
if (match->oxm_field == (OFPXMT_OFB_METADATA << 1) + 1) {
OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata));
OS_MEMCPY(mask, &match->oxm_value[8], sizeof(*mask));
break;
}
if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_METADATA) {
OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata));
*mask = 0xffffffffffffffff;
break;
}
}
if (match == NULL) {
return LAGOPUS_RESULT_NOT_FOUND;
}
return LAGOPUS_RESULT_OK;
}
struct flowinfo *
new_flowinfo_metadata_mask(void) {
struct flowinfo *self;
self = calloc(1, sizeof(struct flowinfo));
if (self != NULL) {
self->nflow = 0;
self->nnext = 0;
self->next = malloc(1);
self->misc = new_flowinfo_eth_type();
self->add_func = add_flow_metadata_mask;
self->del_func = del_flow_metadata_mask;
self->match_func = match_flow_metadata_mask;
self->find_func = find_flow_metadata_mask;
self->destroy_func = destroy_flowinfo_metadata_mask;
}
return self;
}
static void
destroy_flowinfo_metadata_mask(struct flowinfo *self) {
struct flowinfo *flowinfo;
unsigned int i;
for (i = 0; i < self->nnext; i++) {
flowinfo = self->next[i];
flowinfo->destroy_func(flowinfo);
}
free(self->next);
free(self);
}
static void
freeup_flowinfo(void *val) {
struct flowinfo *flowinfo;
flowinfo = val;
flowinfo->destroy_func(flowinfo);
}
struct flowinfo *
new_flowinfo_metadata(void) {
struct flowinfo *self;
self = calloc(1, sizeof(struct flowinfo));
if (self != NULL) {
lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD,
freeup_flowinfo);
/* misc is not used */
self->add_func = add_flow_metadata;
self->del_func = del_flow_metadata;
self->match_func = match_flow_metadata;
self->find_func = find_flow_metadata;
self->destroy_func = destroy_flowinfo_metadata;
}
return self;
}
static void
destroy_flowinfo_metadata(struct flowinfo *self) {
lagopus_hashmap_destroy(&self->hashmap, true);
free(self);
}
static lagopus_result_t
add_flow_metadata_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
/* new node. */
flowinfo = new_flowinfo_metadata();
flowinfo->userdata = mask;
self->next = realloc(self->next,
(unsigned long)(self->nnext + 1) *
sizeof(struct flowinfo *));
self->next[self->nnext] = flowinfo;
self->nnext++;
}
rv = flowinfo->add_func(flowinfo, flow);
} else {
rv = self->misc->add_func(self->misc, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow++;
}
return rv;
}
static lagopus_result_t
del_flow_metadata_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
return LAGOPUS_RESULT_NOT_FOUND;
}
rv = flowinfo->del_func(flowinfo, flow);
if (flowinfo->nflow == 0) {
flowinfo->destroy_func(flowinfo);
self->nnext--;
memmove(&self->next[i], &self->next[i + 1], (size_t)(self->nnext - i));
}
} else {
rv = self->misc->del_func(self->misc, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow--;
}
return rv;
}
static struct flow *
match_flow_metadata_mask(struct flowinfo *self, struct lagopus_packet *pkt,
int32_t *pri) {
struct flowinfo *flowinfo;
struct flow *flow[self->nnext], *matched, *alt_flow;
struct flow mismatched = {
.priority = 0,
.flags = 0,
.idle_timeout = 0,
.hard_timeout = 0,
.match_list = {NULL, NULL},
.instruction_list = {NULL, NULL},
.field_bits = 0
};
unsigned int i;
matched = &mismatched;
//#pragma omp parallel for
for (i = 0; i < self->nnext; i++) {
flowinfo = self->next[i];
flow[i] = flowinfo->match_func(flowinfo, pkt, pri);
}
for (i = 0; i < self->nnext; i++) {
if (flow[i] != NULL && flow[i]->priority > matched->priority) {
matched = flow[i];
}
}
alt_flow = self->misc->match_func(self->misc, pkt, pri);
if (alt_flow != NULL) {
matched = alt_flow;
}
if (matched == &mismatched) {
matched = NULL;
}
return matched;
}
static struct flow *
find_flow_metadata_mask(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
unsigned int i;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = LAGOPUS_RESULT_NOT_FOUND;
for (i = 0; i < self->nnext; i++) {
if (self->next[i]->userdata == mask) {
flowinfo = self->next[i];
rv = LAGOPUS_RESULT_OK;
break;
}
}
if (rv == LAGOPUS_RESULT_NOT_FOUND) {
return NULL;
}
} else {
flowinfo = self->misc;
}
return flowinfo->find_func(flowinfo, flow);
}
static lagopus_result_t
add_flow_metadata(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)metadata, (void *)&flowinfo);
if (rv != LAGOPUS_RESULT_OK) {
void *val;
flowinfo = new_flowinfo_eth_type();
val = flowinfo;
rv = lagopus_hashmap_add_no_lock(&self->hashmap, (void *)metadata,
(void *)&val, false);
if (rv != LAGOPUS_RESULT_OK) {
goto out;
}
}
rv = flowinfo->add_func(flowinfo, flow);
if (rv == LAGOPUS_RESULT_OK) {
self->nflow++;
}
}
out:
return rv;
}
static lagopus_result_t
del_flow_metadata(struct flowinfo *self, struct flow *flow) {
uint64_t metadata, mask;
lagopus_result_t rv;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
struct flowinfo *flowinfo;
rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata,
(void *)&flowinfo);
if (rv == LAGOPUS_RESULT_OK) {
rv = flowinfo->del_func(flowinfo, flow);
}
if (rv == LAGOPUS_RESULT_OK) {
self->nflow--;
}
}
return rv;
}
static struct flow *
match_flow_metadata(struct flowinfo *self, struct lagopus_packet *pkt,
int32_t *pri) {
struct flowinfo *flowinfo;
uint64_t metadata;
struct flow *flow;
lagopus_result_t rv;
flow = NULL;
metadata = (pkt->oob_data.metadata & self->userdata);
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)metadata,
(void *)&flowinfo);
if (rv == LAGOPUS_RESULT_OK) {
flow = flowinfo->match_func(flowinfo, pkt, pri);
}
return flow;
}
static struct flow *
find_flow_metadata(struct flowinfo *self, struct flow *flow) {
struct flowinfo *flowinfo;
uint64_t metadata, mask;
lagopus_result_t rv;
rv = get_match_metadata(&flow->match_list, &metadata, &mask);
if (rv == LAGOPUS_RESULT_OK) {
rv = lagopus_hashmap_find_no_lock(&self->hashmap,
(void *)metadata,
(void *)&flowinfo);
if (rv != LAGOPUS_RESULT_OK) {
return NULL;
}
} else {
flowinfo = self->misc;
}
return flowinfo->find_func(flowinfo, flow);
}
|
sink-fold-3.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-omplower" } */
/* Test that we fold sink offsets correctly while taking into account
pointer sizes. */
typedef struct {
char stuff[400];
} foo;
void
funk (foo *begin, foo *end)
{
foo *p;
#pragma omp parallel for ordered(1)
for (p=end; p > begin; p--)
{
#pragma omp ordered depend(sink:p+2) depend(sink:p+4)
void bar ();
bar();
#pragma omp ordered depend(source)
}
}
/* { dg-final { scan-tree-dump-times "depend\\(sink:p\\+800\\)" 1 "omplower" } } */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.