source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__lt_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int32)
// A*D function (colscale): GB (_AxD__lt_int32)
// D*A function (rowscale): GB (_DxB__lt_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int32)
// C=scalar+B GB (_bind1st__lt_int32)
// C=scalar+B' GB (_bind1st_tran__lt_int32)
// C=A+scalar GB (_bind2nd__lt_int32)
// C=A'+scalar GB (_bind2nd_tran__lt_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT32 || GxB_NO_LT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vecadd_opt1.c | #include <stdio.h>
#include <omp.h>
#include "timer.h"
// large enough to force into main memory
#define ARRAY_SIZE 80000000
static double a[ARRAY_SIZE], b[ARRAY_SIZE], c[ARRAY_SIZE];
void vector_add(double *c, double *a, double *b, int n);
int main(int argc, char *argv[]){
#pragma omp parallel
if (omp_get_thread_num() == 0)
printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart;
double time_sum = 0.0;
for (int i=0; i<ARRAY_SIZE; i++) {
a[i] = 1.0;
b[i] = 2.0;
}
cpu_timer_start(&tstart);
vector_add(c, a, b, ARRAY_SIZE);
time_sum += cpu_timer_stop(tstart);
printf("Runtime is %lf msecs\n", time_sum);
}
void vector_add(double *c, double *a, double *b, int n)
{
#pragma omp parallel for
for (int i=0; i < n; i++){
c[i] = a[i] + b[i];
}
}
|
hmvm_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "hacapk.h"
// ######## ######## ######## ########
void hmvm_omp_1
(double *zau, const matrix *mat, const double *zu)
{
#pragma omp parallel
{
int ip,il,it;
int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill;
double *zaut, *zbut;
int ls, le;
int i;
int nd = mat->nd;
int nlf = mat->nlf;
#pragma omp for
for(i=0;i<nd;i++)zau[i]=0.0;
zaut = (double*)malloc(sizeof(double)*nd);
for(il=0;il<nd;il++)zaut[il]=0.0;
zbut = (double*)malloc(sizeof(double)*mat->ktmax);
ls = nd;
le = 1;
#pragma omp for
for(ip=0; ip<nlf; ip++){
ndl =mat->submat[ip].ndl;
ndt =mat->submat[ip].ndt;
nstrtl=mat->submat[ip].nstrtl;
nstrtt=mat->submat[ip].nstrtt;
if(nstrtl<ls)ls=nstrtl;
if(nstrtl+ndl-1>le)le=nstrtl+ndl-1;
if(mat->submat[ip].ltmtx==1){
kt=mat->submat[ip].kt;
//for(il=0;il<kt;il++)zbut[il]=0.0;
for(il=0; il<kt; il++){
zbut[il]=0.0;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zbut[il] += mat->submat[ip].a1[itl]*zu[itt];
}
}
for(il=0; il<kt; il++){
for(it=0; it<ndl; it++){
ill=it+nstrtl-1;
itl=it+il*ndl;
zaut[ill] += mat->submat[ip].a2[itl]*zbut[il];
}
}
} else if(mat->submat[ip].ltmtx==2){
for(il=0; il<ndl; il++){
ill=il+nstrtl-1;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zaut[ill] += mat->submat[ip].a1[itl]*zu[itt];
}
}
}
}
for(il=ls-1;il<=le-1;il++){
#pragma omp atomic
zau[il] += zaut[il];
}
free(zaut); free(zbut);
}
}
// ######## ######## ######## ########
void hmvm_omp_1t
(double *zau, const matrix *mat, const double *zu)
{
#pragma omp parallel
{
int ip,il,it;
int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill;
double *zaut, *zbut;
int ls, le;
int i;
int nd = mat->nd;
int nlf = mat->nlf;
#pragma omp for
for(i=0;i<nd;i++)zau[i]=0.0;
zaut = (double*)malloc(sizeof(double)*nd);
for(il=0;il<nd;il++)zaut[il]=0.0;
zbut = (double*)malloc(sizeof(double)*mat->ktmax);
ls = nd;
le = 1;
#pragma omp for
for(ip=0; ip<nlf; ip++){
ndl =mat->submat[ip].ndl;
ndt =mat->submat[ip].ndt;
nstrtl=mat->submat[ip].nstrtl;
nstrtt=mat->submat[ip].nstrtt;
if(nstrtl<ls)ls=nstrtl;
if(nstrtl+ndl-1>le)le=nstrtl+ndl-1;
if(mat->submat[ip].ltmtx==1){
kt=mat->submat[ip].kt;
//for(il=0;il<kt;il++)zbut[il]=0.0;
for(il=0; il<kt; il++){
zbut[il]=0.0;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zbut[il] += mat->submat[ip].a1[itl]*zu[itt];
}
}
for(il=0; il<ndl; il++){
ill=il+nstrtl-1;
for(it=0; it<kt; it++){
itl=it+il*kt;
zaut[ill] += mat->submat[ip].a2t[itl]*zbut[it];
}
}
} else if(mat->submat[ip].ltmtx==2){
for(il=0; il<ndl; il++){
ill=il+nstrtl-1;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zaut[ill] += mat->submat[ip].a1[itl]*zu[itt];
}
}
}
}
for(il=ls-1;il<=le-1;il++){
#pragma omp atomic
zau[il] += zaut[il];
}
free(zaut); free(zbut);
}
}
// ######## ######## ######## ########
void hmvm_omp_2
(double *zau, const matrix2 *mat, const double *zu)
{
#pragma omp parallel
{
int ip,il,it;
int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill;
double *zaut, *zbut;
int ls, le;
int i;
int nd = mat->nd;
int nlf = mat->nlf;
int head;
#pragma omp for
for(i=0;i<nd;i++)zau[i]=0.0;
zaut = (double*)malloc(sizeof(double)*nd);
for(il=0;il<nd;il++)zaut[il]=0.0;
zbut = (double*)malloc(sizeof(double)*mat->ktmax);
ls = nd;
le = 1;
#pragma omp for
for(ip=0; ip<nlf; ip++){
ndl =mat->ndl[ip];
ndt =mat->ndt[ip];
nstrtl=mat->nstrtl[ip];
nstrtt=mat->nstrtt[ip];
if(nstrtl<ls)ls=nstrtl;
if(nstrtl+ndl-1>le)le=nstrtl+ndl-1;
if(mat->ltmtx[ip]==1){
kt=mat->kt[ip];
//for(il=0;il<kt;il++)zbut[il]=0.0;
head=mat->a1[ip];
for(il=0; il<kt; il++){
zbut[il]=0.0;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zbut[il] += mat->rowmat[head+itl]*zu[itt];
}
}
head=mat->a2[ip];
for(il=0; il<kt; il++){
for(it=0; it<ndl; it++){
ill=it+nstrtl-1;
itl=it+il*ndl;
zaut[ill] += mat->rowmat[head+itl]*zbut[il];
}
}
} else if(mat->ltmtx[ip]==2){
head=mat->a1[ip];
for(il=0; il<ndl; il++){
ill=il+nstrtl-1;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zaut[ill] += mat->rowmat[head+itl]*zu[itt];
}
}
}
}
for(il=ls-1;il<=le-1;il++){
#pragma omp atomic
zau[il] += zaut[il];
}
free(zaut); free(zbut);
}
}
// ######## ######## ######## ########
void hmvm_omp_2t
(double *zau, const matrix2 *mat, const double *zu)
{
#pragma omp parallel
{
int ip,il,it;
int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill;
double *zaut, *zbut;
int ls, le;
int i;
int nd = mat->nd;
int nlf = mat->nlf;
int head;
#pragma omp for
for(i=0;i<nd;i++)zau[i]=0.0;
zaut = (double*)malloc(sizeof(double)*nd);
for(il=0;il<nd;il++)zaut[il]=0.0;
zbut = (double*)malloc(sizeof(double)*mat->ktmax);
ls = nd;
le = 1;
#pragma omp for
for(ip=0; ip<nlf; ip++){
ndl =mat->ndl[ip];
ndt =mat->ndt[ip];
nstrtl=mat->nstrtl[ip];
nstrtt=mat->nstrtt[ip];
if(nstrtl<ls)ls=nstrtl;
if(nstrtl+ndl-1>le)le=nstrtl+ndl-1;
if(mat->ltmtx[ip]==1){
kt=mat->kt[ip];
//for(il=0;il<kt;il++)zbut[il]=0.0;
head=mat->a1[ip];
for(il=0; il<kt; il++){
zbut[il]=0.0;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zbut[il] += mat->rowmat_t[head+itl]*zu[itt];
}
}
head=mat->a2[ip];
for(il=0; il<ndl; il++){
ill=il+nstrtl-1;
for(it=0; it<kt; it++){
itl=it+il*kt;
zaut[ill] += mat->rowmat_t[head+itl]*zbut[it];
}
}
} else if(mat->ltmtx[ip]==2){
head=mat->a1[ip];
for(il=0; il<ndl; il++){
ill=il+nstrtl-1;
for(it=0; it<ndt; it++){
itt=it+nstrtt-1;
itl=it+il*ndt;
zaut[ill] += mat->rowmat_t[head+itl]*zu[itt];
}
}
}
}
for(il=ls-1;il<=le-1;il++){
#pragma omp atomic
zau[il] += zaut[il];
}
free(zaut); free(zbut);
}
}
// ######## ######## ######## ########
void hmvm_omp(const matrix *mat, const matrix2 *mat2, const double *b, int dump_result)
{
int i, nd;
FILE *F;
double *v=NULL;
printf("hmvm_omp: begin\n");
if(mat!=NULL)nd=mat->nd;else nd=mat2->nd;
v=(double*)malloc(sizeof(double)*nd);
// hmvm
if(mat!=NULL){
printf("hmvm_omp_1\n");
for(i=0;i<nd;i++)v[i] = 0.0;
hmvm_omp_1(v, mat, b);
if(dump_result){
F = fopen("result_omp_1_d.txt", "w");
for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]);
fclose(F);
}
}
// hmvm (loop tranposed)
if(mat!=NULL){
printf("hmvm_omp_1t\n");
for(i=0;i<nd;i++)v[i] = 0.0;
hmvm_omp_1t(v, mat, b);
if(dump_result){
F = fopen("result_omp_1t_d.txt", "w");
for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]);
fclose(F);
}
}
// hmvm using rowmat array
if(mat2!=NULL){
printf("hmvm_omp_2\n");
for(i=0;i<nd;i++)v[i] = 0.0;
hmvm_omp_2(v, mat2, b);
if(dump_result){
F = fopen("result_omp_2_d.txt", "w");
for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]);
fclose(F);
}
}
// hmvm using rowmat array (loop tranposed)
if(mat2!=NULL){
printf("hmvm_omp_2t\n");
for(i=0;i<nd;i++)v[i] = 0.0;
hmvm_omp_2t(v, mat2, b);
if(dump_result){
F = fopen("result_omp_2t_d.txt", "w");
for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]);
fclose(F);
}
}
free(v);
printf("hmvm_omp: end\n");
}
// ######## ######## ######## ########
void hmvm_omp_bench(const matrix *mat, const matrix2 *mat2, const double *b)
{
const int L=10;
int i, l, nd;
double d1, d2, dtimes[L], dmin, dmax, davg;
double *v=NULL;
printf("hmvm_omp_bench: begin\n");
if(mat!=NULL)nd=mat->nd;else nd=mat2->nd;
v=(double*)malloc(sizeof(double)*nd);
// hmvm
if(mat!=NULL){
printf("hmvm_omp_1\n");
for(l=0;l<L;l++){
for(i=0;i<nd;i++)v[i] = 0.0;
d1 = omp_get_wtime();
hmvm_omp_1(v, mat, b);
d2 = omp_get_wtime();
dtimes[l] = d2-d1;
}
dmin = 9999.99;
dmax = 0.0;
davg = 0.0;
for(i=5;i<L;i++){
davg += dtimes[i];
if(dmin>dtimes[i])dmin=dtimes[i];
if(dmax<dtimes[i])dmax=dtimes[i];
}
davg /= (L-5);
printf("TIME %d hmvm_omp_1 min %e max %e avg %e\n", L, dmin, dmax, davg);
}
// hmvm (loop transposed)
if(mat!=NULL){
printf("hmvm_omp_1t\n");
for(l=0;l<L;l++){
for(i=0;i<nd;i++)v[i] = 0.0;
d1 = omp_get_wtime();
hmvm_omp_1t(v, mat, b);
d2 = omp_get_wtime();
dtimes[l] = d2-d1;
}
dmin = 9999.99;
dmax = 0.0;
davg = 0.0;
for(i=5;i<L;i++){
davg += dtimes[i];
if(dmin>dtimes[i])dmin=dtimes[i];
if(dmax<dtimes[i])dmax=dtimes[i];
}
davg /= (L-5);
printf("TIME %d hmvm_omp_1t min %e max %e avg %e\n", L, dmin, dmax, davg);
}
// hmvm using rowmat array
if(mat!=NULL){
printf("hmvm_omp_2\n");
for(l=0;l<L;l++){
for(i=0;i<nd;i++)v[i] = 0.0;
d1 = omp_get_wtime();
hmvm_omp_2(v, mat2, b);
d2 = omp_get_wtime();
dtimes[l] = d2-d1;
}
dmin = 9999.99;
dmax = 0.0;
davg = 0.0;
for(i=5;i<L;i++){
davg += dtimes[i];
if(dmin>dtimes[i])dmin=dtimes[i];
if(dmax<dtimes[i])dmax=dtimes[i];
}
davg /= (L-5);
printf("TIME %d hmvm_omp_2 min %e max %e avg %e\n", L, dmin, dmax, davg);
}
// hmvm using rowmat array (loop tranposed)
if(mat!=NULL){
printf("hmvm_omp_2t\n");
for(l=0;l<L;l++){
for(i=0;i<nd;i++)v[i] = 0.0;
d1 = omp_get_wtime();
hmvm_omp_2t(v, mat2, b);
d2 = omp_get_wtime();
dtimes[l] = d2-d1;
}
dmin = 9999.99;
dmax = 0.0;
davg = 0.0;
for(i=5;i<L;i++){
davg += dtimes[i];
if(dmin>dtimes[i])dmin=dtimes[i];
if(dmax<dtimes[i])dmax=dtimes[i];
}
davg /= (L-5);
printf("TIME %d hmvm_omp_2t min %e max %e avg %e\n", L, dmin, dmax, davg);
}
free(v);
printf("hmvm_omp_bench: end\n");
}
|
target_parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for'}}
#pragma omp target parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for'}}
#pragma omp target parallel for foo
void test_no_clause() {
int i;
#pragma omp target parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target parallel for' must be a for loop}}
#pragma omp target parallel for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
#pragma omp target parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
#pragma omp target parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp target parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'target parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
sample_sort.c |
/*********************************************************************
samplesort.c: source: http://www.cse.iitd.ernet.in/~dheerajb/MPI/codes/day-3/c/samplesort.c
Objective : To sort unsorted integers by sample sort algorithm
Write a MPI program to sort n integers, using sample
sort algorithm on a p processor of PARAM 10000.
Assume n is multiple of p. Sorting is defined as the
task of arranging an unordered collection of elements
into monotonically increasing (or decreasing) order.
postcds: array[] is sorted in ascending order ANSI C
provides a quicksort function called sorting(). Its
function prototype is in the standard header file
<stdlib.h>
Description : 1. Partitioning of the input data and local sort :
The first step of sample sort is to partition the data.
Initially, each one of the p processors stores n/p
elements of the sequence of the elements to be sorted.
Let Ai be the sequence stored at processor Pi. In the
first phase each processor sorts the local n/p elements
using a serial sorting algorithm. (You can use C
library sorting() for performing this local sort).
2. Choosing the Splitters :
The second phase of the algorithm determines the p-1
splitter elements S. This is done as follows. Each
processor Pi selects p-1 equally spaced elements from
the locally sorted sequence Ai. These p-1 elements
from these p(p-1) elements are selected to be the
splitters.
3. Completing the sort :
In the third phase, each processor Pi uses the splitters
to partition the local sequence Ai into p subsequences
Ai,j such that for 0 <=j <p-1 all the elements in Ai,j
are smaller than Sj , and for j=p-1 (i.e., the last
element) Ai, j contains the rest elements. Then each
processor i sends the sub-sequence Ai,j to processor Pj.
Finally, each processor merge-sorts the received
sub-sequences, completing the sorting algorithm.
Input : Process with rank 0 generates unsorted integers
using C library call rand().
Output : Process with rank 0 stores the sorted elements in
the file sorted_data_out.
*********************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <limits.h>
#include <assert.h>
//#define TOTAL_ELEMENT_PER_PE (4*1024*1024)
#define TOTAL_ELEMENT_PER_PE (8*1024*1024)
#define ELEMENT_T int
#define COUNT_T uint64_t
#define VERIFY
#define HC_GRANULARITY 2048
//#define HC_GRANULARITY 3072
#ifdef _OSHMEM_
#include <shmem.h>
long pSync[_SHMEM_BCAST_SYNC_SIZE];
#define RESET_BCAST_PSYNC { int _i; for(_i=0; _i<_SHMEM_BCAST_SYNC_SIZE; _i++) { pSync[_i] = _SHMEM_SYNC_VALUE; } shmem_barrier_all(); }
#endif
#ifdef _MPI_
#include <mpi.h>
#define ELEMENT_T_MPI MPI_INT
#define shmem_malloc malloc
#define shmem_free free
#endif
long seconds() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec*1000000+t.tv_usec;
}
static int compare(const void *i, const void *j)
{
if ((*(ELEMENT_T*)i) > (*(ELEMENT_T *)j))
return (1);
if ((*(ELEMENT_T *)i) < (*(ELEMENT_T *)j))
return (-1);
return (0);
}
int partition(ELEMENT_T* data, int left, int right) {
int i = left;
int j = right;
ELEMENT_T tmp;
ELEMENT_T pivot = data[(left + right) / 2];
while (i <= j) {
while (data[i] < pivot) i++;
while (data[j] > pivot) j--;
if (i <= j) {
tmp = data[i];
data[i] = data[j];
data[j] = tmp;
i++;
j--;
}
}
return i;
}
#ifdef _ASYNC_OSHMEM_
typedef struct sort_data_t {
ELEMENT_T *buffer;
int left;
int right;
} sort_data_t;
void par_sort(void* arg) {
sort_data_t *in = (sort_data_t*) arg;
ELEMENT_T* data = in->buffer;
int left = in->left;
int right = in->right;
if (right - left + 1 > HC_GRANULARITY) {
int index = partition(data, left, right);
shmem_task_scope_begin();
if (left < index - 1) {
sort_data_t* buf = (sort_data_t*) malloc(sizeof(sort_data_t));
buf->buffer = data;
buf->left = left;
buf->right = index - 1;
shmem_task_nbi(par_sort, buf, NULL);
}
if (index < right) {
sort_data_t* buf = (sort_data_t*) malloc(sizeof(sort_data_t));
buf->buffer = data;
buf->left = index;
buf->right = right;
shmem_task_nbi(par_sort, buf, NULL);
}
shmem_task_scope_end();
}
else {
// quicksort in C library
qsort(data+left, right - left + 1, sizeof(ELEMENT_T), compare);
}
free(arg);
}
void sorting(ELEMENT_T* buffer, int size) {
sort_data_t* buf = (sort_data_t*) malloc(sizeof(sort_data_t));
buf->buffer = buffer;
buf->left = 0;
buf->right = size - 1;
long start = seconds();
shmem_task_scope_begin();
shmem_task_nbi(par_sort, buf, NULL);
shmem_task_scope_end();
double end = (((double)(seconds()-start))/1000000) * 1000; // msec
printf("Sorting (%d) = %.3f\n",size, end);
}
#else // OpenMP
void par_sort(ELEMENT_T* data, int left, int right) {
if (right - left + 1 > HC_GRANULARITY) {
int index = partition(data, left, right);
if (left < index - 1) {
#pragma omp task
{
par_sort(data, left, index - 1);
}
}
if (index < right) {
#pragma omp task
{
par_sort(data, index, right);
}
}
#pragma omp taskwait
}
else {
// quicksort in C library
qsort(data+left, right - left + 1, sizeof(ELEMENT_T), compare);
}
}
void sorting(ELEMENT_T* buffer, int size) {
long start = seconds();
#pragma omp parallel
{
#pragma omp single nowait
{
par_sort(buffer, 0, size-1);
}
}
double end = (((double)(seconds()-start))/1000000) * 1000; // msec
printf("Sorting (%d) = %.3f\n",size, end);
}
#endif
#ifdef _OSHMEM_
void shmem_scatter32(ELEMENT_T* dest, ELEMENT_T* src, int root, int count) {
int i;
int me = shmem_my_pe();
int procs = shmem_n_pes();
shmem_barrier_all();
if(me == root) {
for(i=0; i<procs; i++) {
ELEMENT_T* start = &src[i * count];
shmem_put32(dest, start, count, i);
}
}
shmem_barrier_all();
}
void shmem_gather32(ELEMENT_T* dest, ELEMENT_T* src, int root, int count) {
int me = shmem_my_pe();
int procs = shmem_n_pes();
shmem_barrier_all();
ELEMENT_T* target_index = &dest[me * count];
shmem_put32(target_index, src, count, root);
shmem_barrier_all();
}
void shmem_alltoall32(ELEMENT_T* dest, ELEMENT_T* src, int count) {
int i;
int me = shmem_my_pe();
int procs = shmem_n_pes();
shmem_barrier_all();
for(i=0; i<procs; i++) {
shmem_put32(&dest[me*count], &src[i*count], count, i);
}
shmem_barrier_all();
}
#endif
#ifndef HCLIB_COMM_WORKER_FIXED
void entrypoint(void *arg) {
#else
int main (int argc, char *argv[]) {
/**** Initialising ****/
#if defined(_OSHMEM_) && defined(_MPI_)
printf("ERROR: You cannot use both OpenSHMEM as well as MPI\n");
exit(1);
#endif
#if defined(_OSHMEM_)
shmem_init ();
#elif defined(_MPI_)
MPI_Init(&argc, &argv);
#else
printf("ERROR: Use either OpenSHMEM or MPI\n");
exit(1);
#endif
#endif
/* Variable Declarations */
int Numprocs,MyRank, Root = 0;
COUNT_T i,j,k, NoofElements, NoofElements_Bloc,
NoElementsToSort;
COUNT_T count, temp;
ELEMENT_T *Input, *InputData;
ELEMENT_T *Splitter, *AllSplitter;
ELEMENT_T *Buckets, *BucketBuffer, *LocalBucket;
ELEMENT_T *OutputBuffer, *Output, *target_index;
long start_time = seconds();
long local_timer_start;
double local_timer_end, end_time, init_time;
double communication_timer=0;
#if defined(_MPI_)
MPI_Comm_size(MPI_COMM_WORLD, &Numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
#else
Numprocs = shmem_n_pes ();
MyRank = shmem_my_pe ();
#endif
assert((TOTAL_ELEMENT_PER_PE * Numprocs) < INT_MAX && "Change count type from int to uint64_t");
NoofElements = TOTAL_ELEMENT_PER_PE * Numprocs;
/**** Reading Input ****/
Input = (ELEMENT_T *) shmem_malloc (NoofElements*sizeof(*Input));
if(Input == NULL) {
printf("Error : Can not allocate memory \n");
}
if (MyRank == Root){
printf("\n-----\nmkdir timedrun fake\n\n");
/* Initialise random number generator */
printf ("Generating input Array for Sorting %d numbers\n",NoofElements);
srand48((ELEMENT_T)NoofElements);
for(i=0; i< NoofElements; i++) {
Input[i] = rand();
}
}
/**** Sending Data ****/
NoofElements_Bloc = NoofElements / Numprocs;
InputData = (ELEMENT_T *) shmem_malloc (NoofElements_Bloc * sizeof (*InputData));
if(InputData == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Scatter(Input, NoofElements_Bloc, ELEMENT_T_MPI, InputData,
NoofElements_Bloc, ELEMENT_T_MPI, Root, MPI_COMM_WORLD);
#else
shmem_scatter32(InputData, Input, 0, NoofElements_Bloc);
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
init_time = local_timer_end;
printf("Scatter = %.3f\n",local_timer_end);
/**** Sorting Locally ****/
sorting(InputData, NoofElements_Bloc);
/**** Choosing Local Splitters ****/
Splitter = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * (Numprocs-1));
if(Splitter == NULL) {
printf("Error : Can not allocate memory \n");
}
for (i=0; i< (Numprocs-1); i++){
Splitter[i] = InputData[NoofElements/(Numprocs*Numprocs) * (i+1)];
}
/**** Gathering Local Splitters at Root ****/
AllSplitter = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * Numprocs * (Numprocs-1));
if(AllSplitter == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Gather (Splitter, Numprocs-1, ELEMENT_T_MPI, AllSplitter, Numprocs-1,
ELEMENT_T_MPI, Root, MPI_COMM_WORLD);
#else
shmem_gather32(AllSplitter, Splitter, 0, Numprocs-1);
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("Gather = %.3f\n",local_timer_end);
/**** Choosing Global Splitters ****/
if (MyRank == Root){
sorting (AllSplitter, Numprocs*(Numprocs-1));
for (i=0; i<Numprocs-1; i++)
Splitter[i] = AllSplitter[(Numprocs-1)*(i+1)];
}
local_timer_start = seconds();
/**** Broadcasting Global Splitters ****/
#if defined(_MPI_)
MPI_Bcast (Splitter, Numprocs-1, ELEMENT_T_MPI, 0, MPI_COMM_WORLD);
#else
RESET_BCAST_PSYNC;
shmem_broadcast32(Splitter, Splitter, Numprocs-1, 0, 0, 0, Numprocs, pSync);
shmem_barrier_all();
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("Bcast = %.3f\n",local_timer_end);
/**** Creating Numprocs Buckets locally ****/
Buckets = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * (NoofElements + Numprocs));
if(Buckets == NULL) {
printf("Error : Can not allocate memory \n");
}
j = 0;
k = 1;
for (i=0; i<NoofElements_Bloc; i++){
if(j < (Numprocs-1)){
if (InputData[i] < Splitter[j])
Buckets[((NoofElements_Bloc + 1) * j) + k++] = InputData[i];
else{
Buckets[(NoofElements_Bloc + 1) * j] = k-1;
k=1;
j++;
i--;
}
}
else
Buckets[((NoofElements_Bloc + 1) * j) + k++] = InputData[i];
}
Buckets[(NoofElements_Bloc + 1) * j] = k - 1;
shmem_free(Splitter);
shmem_free(AllSplitter);
/**** Sending buckets to respective processors ****/
BucketBuffer = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * (NoofElements + Numprocs));
if(BucketBuffer == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Alltoall (Buckets, NoofElements_Bloc + 1, ELEMENT_T_MPI, BucketBuffer,
NoofElements_Bloc + 1, ELEMENT_T_MPI, MPI_COMM_WORLD);
#else
shmem_alltoall32(BucketBuffer, Buckets, NoofElements_Bloc + 1);
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("AlltoAll = %.3f\n",local_timer_end);
/**** Rearranging BucketBuffer ****/
LocalBucket = (ELEMENT_T *) shmem_malloc (sizeof (ELEMENT_T) * 2 * NoofElements / Numprocs);
if(LocalBucket == NULL) {
printf("Error : Can not allocate memory \n");
}
count = 1;
for (j=0; j<Numprocs; j++) {
k = 1;
for (i=0; i<BucketBuffer[(NoofElements/Numprocs + 1) * j]; i++)
LocalBucket[count++] = BucketBuffer[(NoofElements/Numprocs + 1) * j + k++];
}
LocalBucket[0] = count-1;
/**** Sorting Local Buckets using Bubble Sort ****/
/*sorting (InputData, NoofElements_Bloc, sizeof(int), intcompare); */
NoElementsToSort = LocalBucket[0];
sorting (&LocalBucket[1], NoElementsToSort);
/**** Gathering sorted sub blocks at root ****/
OutputBuffer = (ELEMENT_T *) shmem_malloc (sizeof(ELEMENT_T) * 2 * NoofElements);
if(OutputBuffer == NULL) {
printf("Error : Can not allocate memory \n");
}
local_timer_start = seconds();
#if defined(_MPI_)
MPI_Gather (LocalBucket, 2*NoofElements_Bloc, ELEMENT_T_MPI, OutputBuffer,
2*NoofElements_Bloc, ELEMENT_T_MPI, Root, MPI_COMM_WORLD);
#else
shmem_gather32(OutputBuffer, LocalBucket, 0, (2*NoofElements_Bloc));
#endif
local_timer_end = (((double)(seconds()-local_timer_start))/1000000) * 1000;
communication_timer += local_timer_end;
printf("Gather = %.3f\n",local_timer_end);
end_time = (((double)(seconds()-start_time))/1000000) * 1000; // msec
/**** Rearranging output buffer ****/
if (MyRank == Root){
Output = (ELEMENT_T *) malloc (sizeof (ELEMENT_T) * NoofElements);
count = 0;
for(j=0; j<Numprocs; j++){
k = 1;
for(i=0; i<OutputBuffer[(2 * NoofElements/Numprocs) * j]; i++)
Output[count++] = OutputBuffer[(2*NoofElements/Numprocs) * j + k++];
}
printf ( "Number of Elements to be sorted : %d \n", NoofElements);
ELEMENT_T prev = 0;
int fail = 0;
for (i=0; i<NoofElements; i++){
if(Output[i] < prev) { printf("Failed at index %d\n",i); fail = 1; }
prev = Output[i];
}
if(fail) printf("Sorting FAILED\n");
else printf("Sorting PASSED\n");
printf("Time for initialization (tInit) = %.3f\n",init_time);
printf("Time for communicaions (tComm)= %.3f\n",communication_timer); // communication_timer includes init_time
printf("Time for computations (tComp) = %.3f\n",(end_time - communication_timer));
printf("Total Time (excluding initalization = tTotal) = %.3f\n",(end_time - init_time));
free(Output);
printf("============================ Tabulate Statistics ============================\ntInit\ttComm\ttComp\ttTotal\n%.3f\t%.3f\t%.3f\t%.3f\n",init_time, communication_timer, (end_time - communication_timer), (end_time - init_time));
printf("=============================================================================\n===== TEST PASSED in %.3f msec =====\n",end_time);
}/* MyRank==0*/
shmem_free(Input);
shmem_free(OutputBuffer);
shmem_free(InputData);
shmem_free(Buckets);
shmem_free(BucketBuffer);
shmem_free(LocalBucket);
#ifndef HCLIB_COMM_WORKER_FIXED
}
int main (int argc, char ** argv) {
#if defined(_OSHMEM_) && defined(_MPI_)
printf("ERROR: You cannot use both OpenSHMEM as well as MPI\n");
exit(1);
#endif
#if defined(_OSHMEM_)
shmem_init ();
#ifdef _ASYNC_OSHMEM_
shmem_workers_init(entrypoint, NULL);
#else
entrypoint(NULL);
#endif //_ASYNC_OSHMEM_
shmem_finalize ();
#elif defined(_MPI_)
MPI_Init(&argc, &argv);
entrypoint(NULL);
MPI_Finalize();
#else
printf("ERROR: Use either OpenSHMEM or MPI\n");
exit(1);
#endif
return 0;
}
#else // HCLIB_COMM_WORKER_FIXED
/**** Finalize ****/
#if defined(_OSHMEM_)
shmem_finalize();
#elif defined(_MPI_)
MPI_Finalize();
#endif
}
#endif
|
GB_binop__isgt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isgt_int16
// A.*B function (eWiseMult): GB_AemultB__isgt_int16
// A*D function (colscale): GB_AxD__isgt_int16
// D*A function (rowscale): GB_DxB__isgt_int16
// C+=B function (dense accum): GB_Cdense_accumB__isgt_int16
// C+=b function (dense accum): GB_Cdense_accumb__isgt_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int16
// C=scalar+B GB_bind1st__isgt_int16
// C=scalar+B' GB_bind1st_tran__isgt_int16
// C=A+scalar GB_bind2nd__isgt_int16
// C=A'+scalar GB_bind2nd_tran__isgt_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isgt_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isgt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isgt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isgt_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isgt_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__isgt_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_ex_30.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
int main()
{
#pragma omp parallel for schedule(runtime)
for(unsigned int i=0; i<16; i++)
{
unsigned id = omp_get_thread_num();
printf("i = %i from thread: %i\n", i, id);
}
return 0;
}
|
state.h | #pragma once
#ifndef STATE_H
#define STATE_H
#include "logger.h"
//#include "pop.h"
struct state{
vector<long long> ptevals; //number of instruction evals
vector<long long> numevals; //number of evals on each thread
vector<int> genevals; //total evals for each generation
vector <int> fit_best;
vector <float> fit_mean;
vector <float> fit_med;
vector <float> fit_std;
vector <float> size_mean;
vector<float> median_lex_cases;
vector<float> median_lex_pool;
// vector<float> median_passes_per_case;
vector <int> size_med;
vector <float> size_std;
vector <float> eff_size;
vector<int> eHC_updates;
vector<int> eHC_ties;
int total_eHC_updates;
float current_eHC_updates;
int total_eHC_ties;
float current_eHC_ties;
vector<int> pHC_updates;
float current_pHC_updates;
int total_pHC_updates;
vector<int> good_cross;
vector<int> bad_cross;
vector<int> neut_cross;
float good_cross_pct;
float neut_cross_pct;
float bad_cross_pct;
logger out;
state()
{
int nt=0;
#if defined(_WIN32)
#pragma omp parallel
{
nt = omp_get_max_threads();
}
#else
#pragma omp parallel
{
nt = omp_get_num_threads();
}
#endif
ptevals.assign(nt,0);
//ptevals.resize(nt);
//numevals.resize(nt);
median_lex_cases.assign(nt, 0);
median_lex_pool.assign(nt, 0);
// median_passes_per_case.assign(nt, 0);
numevals.assign(nt,0);
genevals.push_back(0);
eHC_updates.assign(nt,0);
eHC_ties.assign(nt, 0);
pHC_updates.assign(nt,0);
good_cross.assign(nt,0);
bad_cross.assign(nt,0);
neut_cross.assign(nt,0);
total_eHC_updates=0;
current_eHC_updates = 0;
total_pHC_updates=0;
current_pHC_updates = 0;
total_eHC_ties = 0;
good_cross_pct=0;
neut_cross_pct=0;
}
~state() {}
int getgenevals()
{
return genevals.back();
}
void setgenevals()
{
unsigned long gentmp=0;
for(unsigned int i = 0;i<numevals.size();++i)
gentmp+=numevals.at(i);
genevals.push_back(gentmp - totalevals());
}
long long totalevals()
{
long long te=0;
for(unsigned int i = 0;i<genevals.size();++i)
te+=genevals.at(i);
return te;
}
long long totalptevals()
{
long long te=0;
for(unsigned int i = 0;i<ptevals.size();++i)
te+=ptevals.at(i);
return te;
}
float get_median_lex_cases()
{
float sz = 0;
float mlc = 0;
for (unsigned int i = 0; i < median_lex_cases.size(); ++i) {
if (median_lex_cases[i] > 0) {
++sz;
mlc += median_lex_cases[i];
}
}
return mlc/sz;
//return accumulate(median_lex_cases.begin(),median_lex_cases.end(),0.0)/median_lex_cases.size();
}
float get_median_lex_pool()
{
float sz = 0;
float mlp = 0;
for (unsigned int i = 0; i < median_lex_pool.size(); ++i) {
if (median_lex_pool[i] > 0) {
++sz;
mlp += median_lex_pool[i];
}
}
return mlp / sz;
//return accumulate(median_lex_pool.begin(), median_lex_pool.end(), 0.0) / median_lex_pool.size();
}
// float get_median_passes_per_case()
// {
// float sz = 0;
// float mpc = 0;
// for (unsigned int i = 0; i < median_passes_per_case.size(); ++i) {
// if (median_passes_per_case[i] > 0) {
// ++sz;
// mpc += median_passes_per_case[i];
// }
// }
// if (sz == 0) sz = 1;
// return mpc / sz;
// //return accumulate(median_lex_pool.begin(), median_lex_pool.end(), 0.0) / median_lex_pool.size();
// }
int setPHCupdates()
{
int updates=0;
for(unsigned int i =0;i<pHC_updates.size();++i)
updates+=pHC_updates[i];
int val = (updates-total_pHC_updates);
total_pHC_updates+=val;
current_pHC_updates = float(val);
return val;
}
int setEHCupdates()
{
int updates=0;
for(unsigned int i =0;i<eHC_updates.size();++i)
updates+=eHC_updates[i];
int val = (updates-total_eHC_updates);
total_eHC_updates+=val;
current_eHC_updates = float(val);
return val;
}
int setEHCties()
{
int ties = 0;
for (unsigned int i = 0; i<eHC_ties.size(); ++i)
ties += eHC_ties[i];
int val = (ties - total_eHC_ties);
total_eHC_ties += val;
current_eHC_ties = float(val);
return val;
}
float getGoodCrossPct()
{
float total_good=0;
float total=0;
for(unsigned int i=0;i<good_cross.size();++i)
{
total_good+=good_cross.at(i);
total += good_cross.at(i)+bad_cross.at(i)+neut_cross.at(i);
}
//good_cross.assign(omp_get_max_threads(),0);
if (total==0){
good_cross_pct=0;
return 0;
}
else
{
good_cross_pct = total_good/float(total)*100;
return good_cross_pct;
}
}
float getNeutCrossPct()
{
float total_neut=0;
float total=0;
for(unsigned int i=0;i<neut_cross.size();++i)
{
total_neut+=neut_cross.at(i);
total += neut_cross.at(i)+bad_cross.at(i)+good_cross.at(i);
}
//neut_cross.assign(omp_get_max_threads(),0);
if (total==0){
neut_cross_pct = 0;
return 0;
}
else
{
neut_cross_pct = total_neut/total*100;
return neut_cross_pct;
}
}
float getBadCrossPct()
{
float total_bad=0;
float total=0;
for(unsigned int i=0;i<neut_cross.size();++i)
{
total_bad+=bad_cross.at(i);
total += neut_cross.at(i)+bad_cross.at(i)+good_cross.at(i);
}
clearCross();
if (total==0){
bad_cross_pct = 0;
return 0;
}
else
{
bad_cross_pct = total_bad/total*100;
return bad_cross_pct;
}
}
void clearCross()
{
for (size_t i =0; i<good_cross.size(); ++i){
good_cross[i] = 0;
bad_cross[i] = 0;
neut_cross[i] = 0;
}
}
void setCrossPct(vector<ind>& pop)
{
for(int i=0;i<pop.size();++i)
{
if (pop.at(i).parentfitness > pop.at(i).fitness)
good_cross[omp_get_thread_num()]=good_cross[omp_get_thread_num()]+1;
else if(pop.at(i).parentfitness == pop.at(i).fitness)
neut_cross[omp_get_thread_num()]=neut_cross[omp_get_thread_num()]+1;
else
bad_cross[omp_get_thread_num()]=bad_cross[omp_get_thread_num()]+1;
}
}
void clear()
{
ptevals.clear();
numevals.clear();
genevals.clear();
fit_best.clear();
fit_mean.clear();
fit_med.clear();
fit_std.clear();
size_mean.clear();
size_med.clear();
size_std.clear();
ptevals.resize(omp_get_max_threads());
numevals.resize(omp_get_max_threads());
genevals.push_back(0);
}
};
#endif
|
diamond.h | // This code is modified from AutoMine and GraphZero
// Daniel Mawhirter and Bo Wu. SOSP 2019.
// AutoMine: Harmonizing High-Level Abstraction and High Performance for Graph Mining
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
auto y0 = g.N(v0);
for (vidType v1 : g.N(v0)) {
if (v1 >= v0) break;
auto y0y1 = intersection_set(y0, g.N(v1));
for (vidType v2 : y0y1) {
for (vidType v3 : y0y1) {
if (v3 >= v2) break;
counter += 1;
}
}
}
}
|
countersparallel.c | //counters parallel version HPC Felix Feliu
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <memory.h>
#include <malloc.h>
#include <papi.h>
#include <omp.h>
#define SIZE 1000
int main(int argc, char** argv) {
float matrixa[SIZE][SIZE], matrixb[SIZE][SIZE], mresult[SIZE][SIZE];
int i, j, k;
int events[2] = { PAPI_TOT_INS, PAPI_TOT_IIS, PAPI_LD_INS, PAPI_FP_OPS, PAPI_FP_INS, PAPI_SR_INS }, ret; //hardware counter used
long long values[2];
if (PAPI_num_counters() < 2) {
fprintf(stderr, "No hardware counters here, or PAPI not supported.\n");
exit(1);
}
if ((ret = PAPI_start_counters(events, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
/* Initialize the Matrix arrays */
for (i = 0; i < SIZE * SIZE; i++) {
mresult[0][i] = 0.0;
matrixa[0][i] = matrixb[0][i] = rand() * (float)1.1;
}
//parallel block start
/* Matrix-Matrix multiply */
#pragma omp parallel for schedule(static,4) share(SIZE) private(i,j,k)
for (i = 0; i < SIZE; i++)
for (j = 0; j < SIZE; j++)
for (k = 0; k < SIZE; k++)
mresult[i][j] = mresult[i][j] + matrixa[i][k] * matrixb[k][j];
if ((ret = PAPI_read_counters(values, 2)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
//printing option
printf("strore instructions = %lld\n", values[5]);
printf("floating point instructions = %lld\n", values[4]);
printf("floating point operations = %lld\n", values[1]);
printf("load instructions = %lld\n", values[2]);
printf("instructions issued = %lld\n", values[1]);
printf("instructions completed = %lld\n", values[0]);
exit(0);
} |
navierstokes_avx.h | //*****************************************************************************
// Title : src/equation_avx/navierstokes_avx.h
// Author : Tanabe Yuta
// Date : 2021/01/24
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
#include <immintrin.h>
// compile option for g++(MinGW) : -mavx
namespace PANSLBM2 {
namespace NS {
template<class T, template<class>class P>void Macro(T &, T &, T &, const T *, const T *, int); // Function of updating macroscopic values of NS for 2D
template<class T, template<class>class P>void Macro(T &, T &, T &, T &, const T *, const T *, int); // Function of updating macroscopic values of NS for 3D
template<class T, template<class>class P>void Equilibrium(T *, T, T, T); // Function of getting equilibrium of NS for 2D
template<class T, template<class>class P>void Equilibrium(T *, T, T, T, T); // Function of getting equilibrium of NS for 3D
template<class T, template<class>class P>void ExternalForceBrinkman(T, T, T, T, T *, int); // Function of applying external force of NS with Brinkman model for 2D
template<class T, template<class>class P>void ExternalForceBrinkman(T, T, T, T, T, T *, int); // Function of applying external force of NS with Brinkman model for 3D
// Function of updating macroscopic values of NS for 2D
template<class P>
void Macro(__m256d &__rho, __m256d &__ux, __m256d &__uy, const __m256d *__f) {
__rho = __f[0];
__ux = _mm256_setzero_pd();
__uy = _mm256_setzero_pd();
for (int c = 1; c < P::nc; ++c) {
__rho = _mm256_add_pd(__rho, __f[c]);
__ux = _mm256_add_pd(__ux, _mm256_mul_pd(__f[c], P::__cx[c]));
__uy = _mm256_add_pd(__uy, _mm256_mul_pd(__f[c], P::__cy[c]));
}
__m256d __invrho = _mm256_div_pd(_mm256_set1_pd(1.0), __rho);
__ux = _mm256_mul_pd(__ux, __invrho);
__uy = _mm256_mul_pd(__uy, __invrho);
}
// Function of updating macroscopic values of NS for 3D
template<class P>
void Macro(__m256d &__rho, __m256d &__ux, __m256d &__uy, __m256d &__uz, const __m256d *__f) {
__rho = __f[0];
__ux = _mm256_setzero_pd();
__uy = _mm256_setzero_pd();
__uz = _mm256_setzero_pd();
for (int c = 1; c < P::nc; ++c) {
__rho = _mm256_add_pd(__rho, __f[c]);
__ux = _mm256_add_pd(__ux, _mm256_mul_pd(__f[c], P::__cx[c]));
__uy = _mm256_add_pd(__uy, _mm256_mul_pd(__f[c], P::__cy[c]));
__uz = _mm256_add_pd(__uz, _mm256_mul_pd(__f[c], P::__cz[c]));
}
__m256d __invrho = _mm256_div_pd(_mm256_set1_pd(1.0), __rho);
__ux = _mm256_mul_pd(__ux, __invrho);
__uy = _mm256_mul_pd(__uy, __invrho);
__uz = _mm256_mul_pd(__uz, __invrho);
}
// Function of getting equilibrium of NS for 2D
template<class P>
void Equilibrium(__m256d *__feq, const __m256d &__rho, const __m256d &__ux, const __m256d &__uy) {
__m256d __1m15uu = _mm256_sub_pd(_mm256_set1_pd(1.0), _mm256_mul_pd(_mm256_set1_pd(1.5), _mm256_add_pd(_mm256_mul_pd(__ux, __ux), _mm256_mul_pd(__uy, __uy))));
for (int c = 0; c < P::nc; ++c) {
__m256d __cu = _mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy));
__feq[c] = _mm256_mul_pd(P::__ei[c], _mm256_mul_pd(__rho, _mm256_add_pd(__1m15uu, _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __cu), _mm256_mul_pd(_mm256_set1_pd(4.5), _mm256_mul_pd(__cu, __cu))))));
}
}
// Function of getting equilibrium of NS for 3D
template<class P>
void Equilibrium(__m256d *__feq, const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz) {
__m256d __1m15uu = _mm256_sub_pd(_mm256_set1_pd(1.0), _mm256_mul_pd(_mm256_set1_pd(1.5), _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__ux, __ux), _mm256_mul_pd(__uy, __uy)), _mm256_mul_pd(__uz, __uz))));
for (int c = 0; c < P::nc; ++c) {
__m256d __cu = _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy)), _mm256_mul_pd(P::__cz[c], __uz));
__feq[c] = _mm256_mul_pd(P::__ei[c], _mm256_mul_pd(__rho, _mm256_add_pd(__1m15uu, _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __cu), _mm256_mul_pd(_mm256_set1_pd(4.5), _mm256_mul_pd(__cu, __cu))))));
}
}
template<class P>
void ExternalForceBrinkman(const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__alpha, __m256d *__f) {
__m256d __coef = _mm256_div_pd(_mm256_mul_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __alpha), __rho), _mm256_add_pd(__rho, __alpha));
for (int c = 1; c < P::nc; ++c) {
__f[c] = _mm256_sub_pd(__f[c], _mm256_mul_pd(_mm256_mul_pd(__coef, P::__ei[c]), _mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy))));
}
}
template<class P>
void ExternalForceBrinkman(const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz, const __m256d &__alpha, __m256d *__f) {
__m256d __coef = _mm256_div_pd(_mm256_mul_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __alpha), __rho), _mm256_add_pd(__rho, __alpha));
for (int c = 1; c < P::nc; ++c) {
__f[c] = _mm256_sub_pd(__f[c], _mm256_mul_pd(_mm256_mul_pd(__coef, P::__ei[c]), _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy)), _mm256_mul_pd(P::__cz[c], __uz))));
}
}
// Function of Update macro and Collide of NS for 2D
template<template<class>class P>
void MacroCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double _viscosity, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0 and f
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
// Update macro
__m256d __rho, __ux, __uy;
Macro<P<double> >(__rho, __ux, __uy, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_rho[idx], __rho);
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double rho, ux, uy;
Macro<double, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
}
// Collide
Equilibrium<double, P>(feq, rho, ux, uy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro and Collide of NS for 3D
template<template<class>class P>
void MacroCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double *_uz, double _viscosity, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0 and f
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
// Update macro
__m256d __rho, __ux, __uy, __uz;
Macro<P<double> >(__rho, __ux, __uy, __uz, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_rho[idx], __rho);
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
_mm256_storeu_pd(&_uz[idx], __uz);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy, __uz);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double rho, ux, uy, uz;
Macro<double, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
}
// Collide
Equilibrium<double, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro, External force(Brinkman model) and Collide of NS for 2D
template<template<class>class P>
void MacroBrinkmanCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double _viscosity, const double *_alpha, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0 and f
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
// Update macro
__m256d __rho, __ux, __uy;
Macro<P<double> >(__rho, __ux, __uy, __f);
// External force with Brinkman model
__m256d __alpha = _mm256_loadu_pd(&_alpha[idx]);
ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __alpha, __f);
Macro<P<double> >(__rho, __ux, __uy, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_rho[idx], __rho);
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double rho, ux, uy;
Macro<double, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
}
// External force with Brinkman model
ExternalForceBrinkman<double, P>(rho, ux, uy, _alpha[idx], _p.f, idx);
Macro<double, P>(rho, ux, uy, _p.f0, _p.f, idx);
// Collide
Equilibrium<double, P>(feq, rho, ux, uy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro and Collide of NS for 3D
template<template<class>class P>
void MacroBrinkmanCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double *_uz, double _viscosity, const double *_alpha, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0 and f
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
// Update macro
__m256d __rho, __ux, __uy, __uz;
Macro<P<double> >(__rho, __ux, __uy, __uz, __f);
// External force with Brinkman model
__m256d __alpha = _mm256_loadu_pd(&_alpha[idx]);
ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __uz, __alpha, __f);
Macro<P<double> >(__rho, __ux, __uy, __uz, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_rho[idx], __rho);
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
_mm256_storeu_pd(&_uz[idx], __uz);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy, __uz);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double rho, ux, uy, uz;
Macro<double, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
// External force with Brinkman model
ExternalForceBrinkman<double, P>(rho, ux, uy, uz, _alpha[idx], _p.f, idx);
Macro<double, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
}
// Collide
Equilibrium<double, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
}
} |
weights.c |
/*
Author: Mohammed Ahmed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
Compute the weights for each node for the weighted least square
*/
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <math.h>
#include "inc/geometry.h"
#include "inc/allocator.h"
#include "inc/msh/mesh.h"
static inline void
compute_terms(const struct geometry *restrict g,
const double *restrict w,
struct xyz *restrict terms0,
struct xyz *restrict terms1)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
double c0;
double c1;
double termx;
double termy;
double termz;
if(part[node0] == t)
{
c0 = - dx * w[node0 * 7 + 1] + dy;
c1 = - dx * w[node0 * 7 + 2] + dz;
c1 = - w[node0 * 7 + 4] * c0 + c1;
termx = w[node0 * 7 + 3] * w[node0 * 7 + 1] * c0;
termx = dx * w[node0 * 7 + 0] - termx;
termx += w[node0 * 7 + 6] * c1;
termy = w[node0 * 7 + 4] * w[node0 * 7 + 5] * c1;
termy = w[node0 * 7 + 3] * c0 - termy;
termz = w[node0 * 7 + 5] * c1;
terms0->x0[i] = termx;
terms0->x1[i] = termy;
terms0->x2[i] = termz;
}
if(part[node1] == t)
{
c0 = dx * w[node1 * 7 + 1] - dy;
c1 = dx * w[node1 * 7 + 2] - dz;
c1 = - w[node1 * 7 + 4] * c0 + c1;
termx = w[node1 * 7 + 3] * w[node1 * 7 + 1] * c0;
termx = -dx * w[node1 * 7 + 0] - termx;
termx += w[node1 * 7 + 6] * c1;
termy = w[node1 * 7 + 4] * w[node1 * 7 + 5] * c1;
termy = w[node1 * 7 + 3] * c0 - termy;
termz = w[node1 * 7 + 5] * c1;
terms1->x0[i] = termx;
terms1->x1[i] = termy;
terms1->x2[i] = termz;
}
}
}
}
/* Do w22 */
static inline void
w2alloc(const struct geometry *restrict g, double *restrict w)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
if(part[node0] == t)
{
const double d0 = w[node0 * 7 + 1] / w[node0 * 7 + 0];
const double d0_ = dy - dx * d0;
const double d1 = w[node0 * 7 + 2] / w[node0 * 7 + 0];
const double d1_ = dz - dx * d1;
const double d2 = w[node0 * 7 + 4] / w[node0 * 7 + 3];
const double d2_ = d1_ - d2 * d0_;
w[node0 * 7 + 5] += d2_ * d2_;
}
if(part[node1] == t)
{
const double d0 = w[node1 * 7 + 1] / w[node1 * 7 + 0];
const double d0_ = -dy + dx * d0;
const double d1 = w[node1 * 7 + 2] / w[node1 * 7 + 0];
const double d1_ = -dz + dx * d1;
const double d2 = w[node1 * 7 + 4] / w[node1 * 7 + 3];
const double d2_ = d1_ - d2 * d0_;
w[node1 * 7 + 5] += d2_ * d2_;
}
}
}
}
/* Do w11 and w12 */
static inline void
w1alloc(const struct geometry *restrict g, double *restrict w)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
/* Compute the difference of each coordinate component */
const double dx = coordx1 - coordx0;
const double dy = coordy1 - coordy0;
const double dz = coordz1 - coordz0;
if(part[node0] == t)
{
const double d = w[node0 * 7 + 1] / w[node0 * 7 + 0];
const double d_ = dy - dx * d;
w[node0 * 7 + 3] += d_ * d_;
w[node0 * 7 + 4] += d_ * dz;
}
if(part[node1] == t)
{
const double d = w[node1 * 7 + 1] / w[node1 * 7 + 0];
const double d_ = -dy + dx * d;
w[node1 * 7 + 3] += d_ * d_;
w[node1 * 7 + 4] -= d_ * dz;
}
}
}
}
/*
Compute w00, w01, and w02 in parallel
*/
static inline void
w0alloc(const struct geometry *restrict g, double *restrict w)
{
const uint32_t *restrict ie = g->s->ie;
const uint32_t *restrict part = g->s->part;
const uint32_t *restrict n0 = g->e->eptr->n0;
const uint32_t *restrict n1 = g->e->eptr->n1;
const double *restrict x0 = g->n->xyz->x0;
const double *restrict x1 = g->n->xyz->x1;
const double *restrict x2 = g->n->xyz->x2;
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double coordx0 = x0[node0];
const double coordy0 = x1[node0];
const double coordz0 = x2[node0];
const double coordx1 = x0[node1];
const double coordy1 = x1[node1];
const double coordz1 = x2[node1];
/*
* Write-back: Update endpoints
* */
if(part[node0] == t) // Do the left endpoint
{
const double res_x = coordx1 - coordx0;
const double res_y = coordy1 - coordy0;
const double res_z = coordz1 - coordz0;
w[node0 * 7 + 0] += res_x * res_x;
w[node0 * 7 + 1] += res_x * res_y;
w[node0 * 7 + 2] += res_x * res_z;
}
if(part[node1] == t) // Do the right endpoint
{
const double res_x = coordx0 - coordx1;
const double res_y = coordy0 - coordy1;
const double res_z = coordz0 - coordz1;
w[node1 * 7 + 0] += res_x * res_x;
w[node1 * 7 + 1] += res_x * res_y;
w[node1 * 7 + 2] += res_x * res_z;
}
}
}
}
void
wmalloc(struct geometry *restrict g)
{
size_t nnodes = g->n->sz;
double *restrict w;
kcalloc(7 * nnodes, sizeof(double), (void *) &w);
uint32_t i;
/* Do w00, w01, and w02 */
w0alloc(g, w);
/* Compute ||x|| (norm) and divide the other by
the computed norm */
#pragma omp parallel
{
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 0] = sqrt(w[i * 7 + 0]);
#pragma omp for
for(i = 0; i < nnodes; i++)
{
w[i * 7 + 1] /= w[i * 7 + 0];
w[i * 7 + 2] /= w[i * 7 + 0];
}
}
/* Do w11 and w12 */
w1alloc(g, w);
#pragma omp parallel
{
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 3] = sqrt(w[i * 7 + 3]);
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 4] /= w[i * 7 + 3];
}
/* Do w22 */
w2alloc(g, w);
#pragma omp parallel
{
#pragma omp for
for(i = 0; i < nnodes; i++)
w[i * 7 + 5] = sqrt(w[i * 7 + 5]);
/* Update the magnitudes. Stuffs contributed by Dinesh 1998 */
#pragma omp for
for(i = 0; i < nnodes; i++)
{
double sw00 = w[i * 7 + 0] * w[i * 7 + 0];
double sw11 = w[i * 7 + 3] * w[i * 7 + 3];
double sw22 = w[i * 7 + 5] * w[i * 7 + 5];
double w00 = 1.f / sw00;
double w11 = 1.f / sw11;
double w22 = 1.f / sw22;
double w01 = w[i * 7 + 1] / w[i * 7 + 0];
double w02 = w[i * 7 + 2] / w[i * 7 + 0];
double w12 = w[i * 7 + 4] / w[i * 7 + 3];
double m0 = w[i * 7 + 1] * w[i * 7 + 4];
m0 -= w[i * 7 + 2] * w[i * 7 + 3];
double m1 = w[i * 7 + 0] * w[i * 7 + 3] * sw22;
double w33 = m0 / m1;
w[i * 7 + 0] = w00;
w[i * 7 + 3] = w11;
w[i * 7 + 5] = w22;
w[i * 7 + 1] = w01;
w[i * 7 + 2] = w02;
w[i * 7 + 4] = w12;
w[i * 7 + 6] = w33;
}
}
size_t nedges = g->e->sz;
struct xyz *restrict terms0;
kmalloc(1, sizeof(struct xyz), (void *) &terms0);
double *restrict wtermsx0;
kcalloc(nedges, sizeof(double), (void *) &wtermsx0);
double *restrict wtermsy0;
kcalloc(nedges, sizeof(double), (void *) &wtermsy0);
double *restrict wtermsz0;
kcalloc(nedges, sizeof(double), (void *) &wtermsz0);
terms0->x0 = wtermsx0;
terms0->x1 = wtermsy0;
terms0->x2 = wtermsz0;
struct xyz *restrict terms1;
kmalloc(1, sizeof(struct xyz), (void *) &terms1);
double *restrict wtermsx1;
kcalloc(nedges, sizeof(double), (void *) &wtermsx1);
double *restrict wtermsy1;
kcalloc(nedges, sizeof(double), (void *) &wtermsy1);
double *restrict wtermsz1;
kcalloc(nedges, sizeof(double), (void *) &wtermsz1);
terms1->x0 = wtermsx1;
terms1->x1 = wtermsy1;
terms1->x2 = wtermsz1;
compute_terms(g, w, terms0, terms1);
kfree(w);
struct weights *restrict weights;
kmalloc(1, sizeof(struct weights), (void *) &weights);
weights->w0 = terms0;
weights->w1 = terms1;
g->e->w = weights;
}
|
GB_reduce_to_scalar_template.c | //------------------------------------------------------------------------------
// GB_reduce_to_scalar_template: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar, with typecasting and generic operators.
// No panel is used.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *GB_RESTRICT Ax = A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// single thread
//----------------------------------------------------------------------
// s = (ztype) Ax [0]
GB_CAST_ARRAY_TO_SCALAR (s, Ax, 0) ;
for (int64_t p = 1 ; p < anz ; p++)
{
// check for early exit
GB_BREAK_IF_TERMINAL (s) ;
// s = op (s, (ztype) Ax [p])
GB_ADD_CAST_ARRAY_TO_SCALAR (s, Ax, p) ;
}
}
else
{
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
bool early_exit = false ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
// ztype t = (ztype) Ax [pstart], with typecast
GB_SCALAR (t) ;
GB_CAST_ARRAY_TO_SCALAR (t, Ax, pstart) ;
GB_IF_NOT_EARLY_EXIT
{
for (int64_t p = pstart+1 ; p < pend ; p++)
{
// check for early exit
GB_PARALLEL_BREAK_IF_TERMINAL (t) ;
// t = op (t, (ztype) Ax [p]), with typecast
GB_ADD_CAST_ARRAY_TO_SCALAR (t, Ax, p) ;
}
}
// W [tid] = t, no typecast
GB_COPY_SCALAR_TO_ARRAY (W, tid, t) ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
// s = W [0], no typecast
GB_COPY_ARRAY_TO_SCALAR (s, W, 0) ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
}
|
pfmg3_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.18 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "pfmg.h"
/*--------------------------------------------------------------------------
* Macro to "change coordinates". This routine is written as though
* coarsening is being done in the z-direction. This macro is used to
* allow for coarsening to be done in the x- and y-directions also.
*--------------------------------------------------------------------------*/
#define MapIndex(in_index, cdir, out_index) \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 2); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \
cdir = (cdir + 1) % 3;
/*--------------------------------------------------------------------------
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_PFMG3CreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_StructStencil *A_stencil;
HYPRE_Int A_stencil_size;
hypre_Index index_temp;
HYPRE_Int k, j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 3;
A_stencil = hypre_StructMatrixStencil(A);
A_stencil_size = hypre_StructStencilSize(A_stencil);
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* 7-point fine grid stencil produces 19 point RAP
*
* Store all 27 elements except for the corners.
*
* For symmetric A, only store the lower triangular part, where
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*-----------------------------------------------------------------------*/
if( A_stencil_size == 7)
{
RAP_stencil_size = 19;
if (hypre_StructMatrixSymmetric(A))
{
RAP_stencil_size = (RAP_stencil_size + 1) / 2;
}
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
if ((i*j*k == 0) && (stencil_rank < RAP_stencil_size))
{
hypre_SetIndex(index_temp,i,j,k);
MapIndex(index_temp, cdir,
RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
/*-----------------------------------------------------------------------
* 19 or 27 point fine grid stencil produces 27 point RAP
*
* Store all 27 elements
*
* For symmetric A, only store the lower triangular part, where
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*-----------------------------------------------------------------------*/
else
{
RAP_stencil_size = 27;
if (hypre_StructMatrixSymmetric(A))
{
RAP_stencil_size = (RAP_stencil_size + 1) / 2;
}
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
if (stencil_rank < RAP_stencil_size)
{
hypre_SetIndex(index_temp,i,j,k);
MapIndex(index_temp, cdir,
RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* Routines to build RAP. These routines are fairly general
* 1) No assumptions about symmetry of A
* 2) No assumption that R = transpose(P)
* 3) 7, 19 or 27-point fine grid A
*
* I am, however, assuming that the c-to-c interpolation is the identity.
*
* I've written a two routines - hypre_PFMG3BuildRAPSym to build the lower
* triangular part of RAP (including the diagonal) and
* hypre_PFMG3BuildRAPNoSym to build the upper triangular part of RAP
* (excluding the diagonal). So using symmetric storage, only the first
* routine would be called. With full storage both would need to be called.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMG3BuildRAPSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
HYPRE_Int fi, ci;
HYPRE_Int constant_coefficient;
HYPRE_Int constant_coefficient_A;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
hypre_assert( constant_coefficient==0 || constant_coefficient==1 );
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == constant_coefficient );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == constant_coefficient );
if (constant_coefficient==1 )
{
hypre_assert( constant_coefficient_A==1 );
}
else
{
hypre_assert( constant_coefficient_A==0 || constant_coefficient_A==2 );
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 19-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-south, below-west,
* below-center, below-east, below-north, center-south,
* center-west, and center-center).
*--------------------------------------------------------------*/
case 7:
if ( constant_coefficient==1 )
{
hypre_PFMG3BuildRAPSym_onebox_FSS07_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG3BuildRAPSym_onebox_FSS07_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
/*--------------------------------------------------------------
* Loop for symmetric 19-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
case 19:
if ( constant_coefficient==1 )
{
hypre_PFMG3BuildRAPSym_onebox_FSS19_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG3BuildRAPSym_onebox_FSS19_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
/*--------------------------------------------------------------
* Loop for symmetric 27-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
default:
if ( constant_coefficient==1 )
{
hypre_PFMG3BuildRAPSym_onebox_FSS27_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG3BuildRAPSym_onebox_FSS27_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size
(7) and one value of constant_coefficient (0). Within this function
there is a test on constant_coefficient_A as well. */
HYPRE_Int
hypre_PFMG3BuildRAPSym_onebox_FSS07_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac;
double *a_bc;
double a_cs_offd, a_cs_offdm1, a_cs_offdp1;
double a_cn_offdm1;
double a_cw_offd, a_cw_offdm1, a_cw_offdp1;
double a_ce_offdm1;
double a_ac_offd, a_ac_offdm1;
double a_bc_offd, a_bc_offdm1, a_bc_offdp1;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
double *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int zOffsetA_diag;
HYPRE_Int zOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 19-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-south, below-west,
* below-center, below-east, below-north, center-south,
* center-west, and center-center).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_csw[iAc] = 0.0;
rap_cse[iAc] = 0.0;
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - zOffsetA_offd;
iA_offdp1 = iA_offd + zOffsetA_offd;
a_cs_offd = a_cs[iA_offd];
a_cs_offdm1 = a_cs[iA_offdm1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cw_offd = a_cw[iA_offd];
a_cw_offdm1 = a_cw[iA_offdm1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_ce_offdm1 = a_ce[iA_offdm1];
a_cn_offdm1 = a_cn[iA_offdm1];
a_bc_offd = a_bc[iA_offd];
a_bc_offdm1 = a_bc[iA_offdm1];
a_bc_offdp1 = a_bc[iA_offdp1];
a_ac_offd = a_ac[iA_offd];
a_ac_offdm1 = a_ac[iA_offdm1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA_diag;
iAp1 = iA + zOffsetA_diag;
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs_offdm1 * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc_offd * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc_offdm1;
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn_offdm1 * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs_offd
+ rb[iR] * a_cs_offdm1 * pb[iP1]
+ ra[iR] * a_cs_offdp1 * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw_offd
+ rb[iR] * a_cw_offdm1 * pb[iP1]
+ ra[iR] * a_cw_offdp1 * pa[iP1];
rap_csw[iAc] = 0.0;
rap_cse[iAc] = 0.0;
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac_offdm1
+ ra[iR] * a_bc_offdp1
+ a_bc_offd * pb[iP]
+ a_ac_offd * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size
(7) and one value of constant_coefficient (1). */
HYPRE_Int
hypre_PFMG3BuildRAPSym_onebox_FSS07_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac;
double *a_bc;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
double *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 19-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-south, below-west,
* below-center, below-east, below-north, center-south,
* center-west, and center-center).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_csw[iAc] = 0.0;
rap_cse[iAc] = 0.0;
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size
(19) and one value of constant_coefficient (0). Within this functions
there is a test on constant_coefficient_A as well. */
HYPRE_Int
hypre_PFMG3BuildRAPSym_onebox_FSS19_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_as;
double *a_bc, *a_bw, *a_be, *a_bs, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double a_cs_offd, a_cs_offdm1, a_cs_offdp1;
double a_csw_offd, a_csw_offdm1, a_csw_offdp1;
double a_cse_offd, a_cse_offdm1, a_cse_offdp1;
double a_cn_offdm1, a_cne_offdm1, a_cnw_offdm1;
double a_cw_offd, a_cw_offdm1, a_cw_offdp1;
double a_ce_offdm1;
double a_ac_offd, a_ac_offdm1;
double a_aw_offd, a_aw_offdm1;
double a_as_offd, a_as_offdm1;
double a_bc_offd, a_bc_offdm1, a_bc_offdp1;
double a_be_offd, a_be_offdm1;
double a_bn_offd, a_bn_offdm1;
double a_bw_offd, a_bw_offdm1, a_bw_offdp1;
double a_bs_offd, a_bs_offdm1, a_bs_offdp1;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
double *rap_csw, *rap_cse;
double *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int zOffsetA_diag;
HYPRE_Int zOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the lower triangular part.
*
* rap_csw is pointer for southwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 19-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A==0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - zOffsetA_offd;
iA_offdp1 = iA_offd + zOffsetA_offd;
a_cs_offd = a_cs[iA_offd];
a_cs_offdm1 = a_cs[iA_offdm1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cw_offd = a_cw[iA_offd];
a_cw_offdm1 = a_cw[iA_offdm1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_ce_offdm1 = a_ce[iA_offdm1];
a_csw_offd = a_csw[iA_offd];
a_csw_offdm1 = a_csw[iA_offdm1];
a_csw_offdp1 = a_csw[iA_offdp1];
a_cse_offd = a_cse[iA_offd];
a_cse_offdm1 = a_cse[iA_offdm1];
a_cse_offdp1 = a_cse[iA_offdp1];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cne_offdm1 = a_cne[iA_offdm1];
a_cnw_offdm1 = a_cnw[iA_offdm1];
a_ac_offd = a_ac[iA_offd];
a_ac_offdm1 = a_ac[iA_offdm1];
a_aw_offd = a_aw[iA_offd];
a_aw_offdm1 = a_aw[iA_offdm1];
a_as_offd = a_as[iA_offd];
a_as_offdm1 = a_as[iA_offdm1];
a_bc_offd = a_bc[iA_offd];
a_bc_offdm1 = a_bc[iA_offdm1];
a_bc_offdp1 = a_bc[iA_offdp1];
a_be_offd = a_be[iA_offd];
a_be_offdm1 = a_be[iA_offdm1];
a_bn_offd = a_bn[iA_offd];
a_bn_offdm1 = a_bn[iA_offdm1];
a_bw_offd = a_bw[iA_offd];
a_bw_offdm1 = a_bw[iA_offdm1];
a_bw_offdp1 = a_bw[iA_offdp1];
a_bs_offd = a_bs[iA_offd];
a_bs_offdm1 = a_bs[iA_offdm1];
a_bs_offdp1 = a_bs[iA_offdp1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA_diag;
iAp1 = iA + zOffsetA_diag;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw_offdm1 * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs_offdm1 * pa[iP1]
+ rb[iR] * a_bs_offdm1
+ a_bs_offd * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse_offdm1 * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1]
+ rb[iR] * a_bw_offdm1
+ a_bw_offd * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc_offd * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc_offdm1;
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1]
+ rb[iR] * a_be_offdm1
+ a_be_offd * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw_offdm1 * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn_offdm1 * pa[iP1]
+ rb[iR] * a_bn_offdm1
+ a_bn_offd * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne_offdm1 * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw_offd
+ rb[iR] * a_csw_offdm1 * pb[iP1]
+ ra[iR] * a_csw_offdp1 * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs_offd
+ rb[iR] * a_cs_offdm1 * pb[iP1]
+ ra[iR] * a_cs_offdp1 * pa[iP1]
+ a_bs_offd * pb[iP1]
+ a_as_offd * pa[iP1]
+ rb[iR] * a_as_offdm1
+ ra[iR] * a_bs_offdp1;
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse_offd
+ rb[iR] * a_cse_offdm1 * pb[iP1]
+ ra[iR] * a_cse_offdp1 * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw_offd
+ rb[iR] * a_cw_offdm1 * pb[iP1]
+ ra[iR] * a_cw_offdp1 * pa[iP1]
+ a_bw_offd * pb[iP1]
+ a_aw_offd * pa[iP1]
+ rb[iR] * a_aw_offdm1
+ ra[iR] * a_bw_offdp1;
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac_offdm1
+ ra[iR] * a_bc_offdp1
+ a_bc_offd * pb[iP]
+ a_ac_offd * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size
(19) and one value of constant_coefficient (1). */
HYPRE_Int
hypre_PFMG3BuildRAPSym_onebox_FSS19_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_as;
double *a_bc, *a_bw, *a_be, *a_bs, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
double *rap_csw, *rap_cse;
double *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the lower triangular part.
*
* rap_csw is pointer for southwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 19-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size
(27) and one value of constant_coefficient (0). Within this functions
there is a test on constant_coefficient_A as well. */
HYPRE_Int
hypre_PFMG3BuildRAPSym_onebox_FSS27_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_as;
double *a_bc, *a_bw, *a_be, *a_bs, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double *a_asw, *a_ase;
double *a_bsw, *a_bse, *a_bnw, *a_bne;
double a_cs_offd, a_cs_offdm1, a_cs_offdp1;
double a_csw_offd, a_csw_offdm1, a_csw_offdp1;
double a_cse_offd, a_cse_offdm1, a_cse_offdp1;
double a_cn_offdm1, a_cne_offdm1, a_cnw_offdm1;
double a_cw_offd, a_cw_offdm1, a_cw_offdp1;
double a_ce_offdm1;
double a_ac_offd, a_ac_offdm1;
double a_aw_offd, a_aw_offdm1;
double a_as_offd, a_as_offdm1;
double a_asw_offd, a_asw_offdm1;
double a_ase_offd, a_ase_offdm1;
double a_bc_offd, a_bc_offdm1, a_bc_offdp1;
double a_be_offd, a_be_offdm1;
double a_bn_offd, a_bn_offdm1;
double a_bw_offd, a_bw_offdm1, a_bw_offdp1;
double a_bs_offd, a_bs_offdm1, a_bs_offdp1;
double a_bsw_offd, a_bsw_offdm1, a_bsw_offdp1;
double a_bse_offd, a_bse_offdm1, a_bse_offdp1;
double a_bnw_offd, a_bnw_offdm1;
double a_bne_offd, a_bne_offdm1;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
double *rap_csw, *rap_cse;
double *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int zOffsetA_diag;
HYPRE_Int zOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
a_bsw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
a_bse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the lower triangular part.
*
* rap_csw is pointer for southwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 27-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1]
+ rb[iR] * a_bsw[iAm1]
+ a_bsw[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1]
+ rb[iR] * a_bse[iAm1]
+ a_bse[iA] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1]
+ rb[iR] * a_bnw[iAm1]
+ a_bnw[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1]
+ rb[iR] * a_bne[iAm1]
+ a_bne[iA] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1]
+ a_bsw[iA] * pb[iP1]
+ a_asw[iA] * pa[iP1]
+ rb[iR] * a_asw[iAm1]
+ ra[iR] * a_bsw[iAp1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1]
+ a_bse[iA] * pb[iP1]
+ a_ase[iA] * pa[iP1]
+ rb[iR] * a_ase[iAm1]
+ ra[iR] * a_bse[iAp1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - zOffsetA_offd;
iA_offdp1 = iA_offd + zOffsetA_offd;
a_cs_offd = a_cs[iA_offd];
a_cs_offdm1 = a_cs[iA_offdm1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cse_offd = a_cse[iA_offd];
a_cse_offdm1 = a_cse[iA_offdm1];
a_cse_offdp1 = a_cse[iA_offdp1];
a_csw_offd = a_csw[iA_offd];
a_csw_offdm1 = a_csw[iA_offdm1];
a_csw_offdp1 = a_csw[iA_offdp1];
a_cw_offd = a_cw[iA_offd];
a_cw_offdm1 = a_cw[iA_offdm1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cne_offdm1 = a_cne[iA_offdm1];
a_cnw_offdm1 = a_cnw[iA_offdm1];
a_ce_offdm1 = a_ce[iA_offdm1];
a_ac_offd = a_ac[iA_offd];
a_ac_offdm1 = a_ac[iA_offdm1];
a_as_offd = a_as[iA_offd];
a_as_offdm1 = a_as[iA_offdm1];
a_aw_offd = a_aw[iA_offd];
a_aw_offdm1 = a_aw[iA_offdm1];
a_asw_offd = a_asw[iA_offd];
a_asw_offdm1 = a_asw[iA_offdm1];
a_ase_offd = a_ase[iA_offd];
a_ase_offdm1 = a_ase[iA_offdm1];
a_bc_offd = a_bc[iA_offd];
a_bc_offdm1 = a_bc[iA_offdm1];
a_bc_offdp1 = a_bc[iA_offdp1];
a_bs_offd = a_bs[iA_offd];
a_bs_offdm1 = a_bs[iA_offdm1];
a_bs_offdp1 = a_bs[iA_offdp1];
a_bsw_offd = a_bsw[iA_offd];
a_bsw_offdm1 = a_bsw[iA_offdm1];
a_bsw_offdp1 = a_bsw[iA_offdp1];
a_bse_offd = a_bse[iA_offd];
a_bse_offdm1 = a_bse[iA_offdm1];
a_bse_offdp1 = a_bse[iA_offdp1];
a_be_offd = a_be[iA_offd];
a_be_offdm1 = a_be[iA_offdm1];
a_bw_offd = a_bw[iA_offd];
a_bw_offdm1 = a_bw[iA_offdm1];
a_bw_offdp1 = a_bw[iA_offdp1];
a_bn_offd = a_bn[iA_offd];
a_bn_offdm1 = a_bn[iA_offdm1];
a_bnw_offd = a_bnw[iA_offd];
a_bnw_offdm1 = a_bnw[iA_offdm1];
a_bne_offd = a_bne[iA_offd];
a_bne_offdm1 = a_bne[iA_offdm1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA_diag;
iAp1 = iA + zOffsetA_diag;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw_offdm1 * pa[iP1]
+ rb[iR] * a_bsw_offdm1
+ a_bsw_offd * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs_offdm1 * pa[iP1]
+ rb[iR] * a_bs_offdm1
+ a_bs_offd * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse_offdm1 * pa[iP1]
+ rb[iR] * a_bse_offdm1
+ a_bse_offd * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1]
+ rb[iR] * a_bw_offdm1
+ a_bw_offd * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc_offd * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc_offdm1;
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1]
+ rb[iR] * a_be_offdm1
+ a_be_offd * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw_offdm1 * pa[iP1]
+ rb[iR] * a_bnw_offdm1
+ a_bnw_offd * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn_offdm1 * pa[iP1]
+ rb[iR] * a_bn_offdm1
+ a_bn_offd * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne_offdm1 * pa[iP1]
+ rb[iR] * a_bne_offdm1
+ a_bne_offd * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw_offd
+ rb[iR] * a_csw_offdm1 * pb[iP1]
+ ra[iR] * a_csw_offdp1 * pa[iP1]
+ a_bsw_offd * pb[iP1]
+ a_asw_offd * pa[iP1]
+ rb[iR] * a_asw_offdm1
+ ra[iR] * a_bsw_offdp1;
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs_offd
+ rb[iR] * a_cs_offdm1 * pb[iP1]
+ ra[iR] * a_cs_offdp1 * pa[iP1]
+ a_bs_offd * pb[iP1]
+ a_as_offd * pa[iP1]
+ rb[iR] * a_as_offdm1
+ ra[iR] * a_bs_offdp1;
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse_offd
+ rb[iR] * a_cse_offdm1 * pb[iP1]
+ ra[iR] * a_cse_offdp1 * pa[iP1]
+ a_bse_offd * pb[iP1]
+ a_ase_offd * pa[iP1]
+ rb[iR] * a_ase_offdm1
+ ra[iR] * a_bse_offdp1;
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw_offd
+ rb[iR] * a_cw_offdm1 * pb[iP1]
+ ra[iR] * a_cw_offdp1 * pa[iP1]
+ a_bw_offd * pb[iP1]
+ a_aw_offd * pa[iP1]
+ rb[iR] * a_aw_offdm1
+ ra[iR] * a_bw_offdp1;
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac_offdm1
+ ra[iR] * a_bc_offdp1
+ a_bc_offd * pb[iP]
+ a_ac_offd * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size
(27) and one value of constant_coefficient (1). */
HYPRE_Int
hypre_PFMG3BuildRAPSym_onebox_FSS27_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_as;
double *a_bc, *a_bw, *a_be, *a_bs, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double *a_asw, *a_ase;
double *a_bsw, *a_bse, *a_bnw, *a_bne;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn;
double *rap_csw, *rap_cse;
double *rap_bsw, *rap_bse, *rap_bnw, *rap_bne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
a_bsw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
a_bse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,-1);
MapIndex(index_temp, cdir, index);
rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the lower triangular part.
*
* rap_csw is pointer for southwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,-1);
MapIndex(index_temp, cdir, index);
rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 27-point fine grid operator; produces a
* symmetric 27-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (below-southwest, below-south,
* below-southeast, below-west, below-center, below-east,
* below-northwest, below-north, below-northeast, center-southwest,
* center-south, center-southeast, center-west, and center-center).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP - zOffsetP - yOffsetP - xOffsetP;
rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1]
+ rb[iR] * a_bsw[iAm1]
+ a_bsw[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP;
rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]
+ rb[iR] * a_bs[iAm1]
+ a_bs[iA] * pa[iP1];
iP1 = iP - zOffsetP - yOffsetP + xOffsetP;
rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1]
+ rb[iR] * a_bse[iAm1]
+ a_bse[iA] * pa[iP1];
iP1 = iP - zOffsetP - xOffsetP;
rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_bw[iAm1]
+ a_bw[iA] * pa[iP1];
iP1 = iP - zOffsetP;
rap_bc[iAc] = a_bc[iA] * pa[iP1]
+ rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_bc[iAm1];
iP1 = iP - zOffsetP + xOffsetP;
rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_be[iAm1]
+ a_be[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP - xOffsetP;
rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1]
+ rb[iR] * a_bnw[iAm1]
+ a_bnw[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP;
rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]
+ rb[iR] * a_bn[iAm1]
+ a_bn[iA] * pa[iP1];
iP1 = iP - zOffsetP + yOffsetP + xOffsetP;
rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1]
+ rb[iR] * a_bne[iAm1]
+ a_bne[iA] * pa[iP1];
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = a_csw[iA]
+ rb[iR] * a_csw[iAm1] * pb[iP1]
+ ra[iR] * a_csw[iAp1] * pa[iP1]
+ a_bsw[iA] * pb[iP1]
+ a_asw[iA] * pa[iP1]
+ rb[iR] * a_asw[iAm1]
+ ra[iR] * a_bsw[iAp1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = a_cs[iA]
+ rb[iR] * a_cs[iAm1] * pb[iP1]
+ ra[iR] * a_cs[iAp1] * pa[iP1]
+ a_bs[iA] * pb[iP1]
+ a_as[iA] * pa[iP1]
+ rb[iR] * a_as[iAm1]
+ ra[iR] * a_bs[iAp1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = a_cse[iA]
+ rb[iR] * a_cse[iAm1] * pb[iP1]
+ ra[iR] * a_cse[iAp1] * pa[iP1]
+ a_bse[iA] * pb[iP1]
+ a_ase[iA] * pa[iP1]
+ rb[iR] * a_ase[iAm1]
+ ra[iR] * a_bse[iAp1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ a_bw[iA] * pb[iP1]
+ a_aw[iA] * pa[iP1]
+ rb[iR] * a_aw[iAm1]
+ ra[iR] * a_bw[iAp1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_ac[iAm1]
+ ra[iR] * a_bc[iAp1]
+ a_bc[iA] * pb[iP]
+ a_ac[iA] * pa[iP];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMG3BuildRAPNoSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
HYPRE_Int fi, ci;
HYPRE_Int constant_coefficient;
HYPRE_Int constant_coefficient_A;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
hypre_assert( constant_coefficient==0 || constant_coefficient==1 );
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == constant_coefficient );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == constant_coefficient );
if (constant_coefficient==1 )
{
hypre_assert( constant_coefficient_A==1 );
}
else
{
hypre_assert( constant_coefficient_A==0 || constant_coefficient_A==2 );
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for 7-point fine grid operator; produces upper triangular
* part of 19-point coarse grid operator. stencil entries:
* (above-north, above-east, above-center, above-west,
* above-south, center-north, and center-east).
*--------------------------------------------------------------*/
case 7:
if ( constant_coefficient == 1 )
{
hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
/*--------------------------------------------------------------
* Loop for 19-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
case 19:
if ( constant_coefficient == 1 )
{
hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
/*--------------------------------------------------------------
* Loop for 27-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
default:
if ( constant_coefficient == 1 )
{
hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size
(07) and one value of constant_coefficient (0). */
HYPRE_Int
hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac;
double a_cn_offd, a_cn_offdm1, a_cn_offdp1;
double a_ce_offd, a_ce_offdm1, a_ce_offdp1;
double a_cs_offdp1, a_cw_offdp1;
double a_ac_offd, a_ac_offdp1;
double *rap_ce, *rap_cn;
double *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
double *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int zOffsetA_diag;
HYPRE_Int zOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 7-point fine grid operator; produces upper triangular
* part of 19-point coarse grid operator. stencil entries:
* (above-north, above-east, above-center, above-west,
* above-south, center-north, and center-east).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
rap_cnw[iAc] = 0.0;
rap_cne[iAc] = 0.0;
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - zOffsetA_offd;
iA_offdp1 = iA_offd + zOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cn_offdp1 = a_cn[iA_offdp1];
a_ce_offd = a_ce[iA_offd];
a_ce_offdm1 = a_ce[iA_offdm1];
a_ce_offdp1 = a_ce[iA_offdp1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_ac_offd = a_ac[iA_offd];
a_ac_offdp1 = a_ac[iA_offdp1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA_diag;
iAp1 = iA + zOffsetA_diag;
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn_offdp1 * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac_offd * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac_offdp1;
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs_offdp1 * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn_offd
+ rb[iR] * a_cn_offdm1 * pb[iP1]
+ ra[iR] * a_cn_offdp1 * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce_offd
+ rb[iR] * a_ce_offdm1 * pb[iP1]
+ ra[iR] * a_ce_offdp1 * pa[iP1];
rap_cnw[iAc] = 0.0;
rap_cne[iAc] = 0.0;
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size
(07) and one value of constant_coefficient (1). */
HYPRE_Int
hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac;
double *rap_ce, *rap_cn;
double *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
double *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 7-point fine grid operator; produces upper triangular
* part of 19-point coarse grid operator. stencil entries:
* (above-north, above-east, above-center, above-west,
* above-south, center-north, and center-east).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
rap_cnw[iAc] = 0.0;
rap_cne[iAc] = 0.0;
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size
(19) and one value of constant_coefficient (0). */
HYPRE_Int
hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_ae, *a_as, *a_an;
double *a_be, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double a_cn_offd, a_cn_offdm1, a_cn_offdp1;
double a_ce_offd, a_ce_offdm1, a_ce_offdp1;
double a_cs_offdp1, a_cw_offdp1, a_cse_offdp1, a_csw_offdp1;
double a_cne_offd, a_cne_offdm1, a_cne_offdp1;
double a_cnw_offd, a_cnw_offdm1, a_cnw_offdp1;
double a_ac_offd, a_ac_offdp1;
double a_an_offd, a_an_offdm1, a_an_offdp1;
double a_as_offd, a_as_offdp1;
double a_aw_offd, a_aw_offdp1;
double a_ae_offd, a_ae_offdm1, a_ae_offdp1;
double a_be_offd, a_be_offdp1;
double a_bn_offd, a_bn_offdp1;
double *rap_ce, *rap_cn;
double *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
double *rap_cnw, *rap_cne;
double *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int zOffsetA_diag;
HYPRE_Int zOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the upper triangular part.
*
* rap_cnw is pointer for northwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 19-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - zOffsetA_offd;
iA_offdp1 = iA_offd + zOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cn_offdp1 = a_cn[iA_offdp1];
a_cne_offd = a_cne[iA_offd];
a_cne_offdm1 = a_cne[iA_offdm1];
a_cne_offdp1 = a_cne[iA_offdp1];
a_cnw_offd = a_cnw[iA_offd];
a_cnw_offdm1 = a_cnw[iA_offdm1];
a_cnw_offdp1 = a_cnw[iA_offdp1];
a_ce_offd = a_ce[iA_offd];
a_ce_offdm1 = a_ce[iA_offdm1];
a_ce_offdp1 = a_ce[iA_offdp1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cse_offdp1 = a_cse[iA_offdp1];
a_csw_offdp1 = a_csw[iA_offdp1];
a_ac_offd = a_ac[iA_offd];
a_ac_offdp1 = a_ac[iA_offdp1];
a_an_offd = a_an[iA_offd];
a_an_offdm1 = a_an[iA_offdm1];
a_an_offdp1 = a_an[iA_offdp1];
a_as_offd = a_as[iA_offd];
a_as_offdp1 = a_as[iA_offdp1];
a_aw_offd = a_aw[iA_offd];
a_aw_offdp1 = a_aw[iA_offdp1];
a_ae_offd = a_ae[iA_offd];
a_ae_offdm1 = a_ae[iA_offdm1];
a_ae_offdp1 = a_ae[iA_offdp1];
a_be_offd = a_be[iA_offd];
a_be_offdp1 = a_be[iA_offdp1];
a_bn_offd = a_bn[iA_offd];
a_bn_offdp1 = a_bn[iA_offdp1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA_diag;
iAp1 = iA + zOffsetA_diag;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne_offdp1 * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn_offdp1 * pb[iP1]
+ ra[iR] * a_an_offdp1
+ a_an_offd * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw_offdp1 * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1]
+ ra[iR] * a_ae_offdp1
+ a_ae_offd * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac_offd * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac_offdp1;
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1]
+ ra[iR] * a_aw_offdp1
+ a_aw_offd * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse_offdp1 * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs_offdp1 * pb[iP1]
+ ra[iR] * a_as_offdp1
+ a_as_offd * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw_offdp1 * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne_offd
+ rb[iR] * a_cne_offdm1 * pb[iP1]
+ ra[iR] * a_cne_offdp1 * pa[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn_offd
+ rb[iR] * a_cn_offdm1 * pb[iP1]
+ ra[iR] * a_cn_offdp1 * pa[iP1]
+ a_bn_offd * pb[iP1]
+ a_an_offd * pa[iP1]
+ rb[iR] * a_an_offdm1
+ ra[iR] * a_bn_offdp1;
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw_offd
+ rb[iR] * a_cnw_offdm1 * pb[iP1]
+ ra[iR] * a_cnw_offdp1 * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce_offd
+ rb[iR] * a_ce_offdm1 * pb[iP1]
+ ra[iR] * a_ce_offdp1 * pa[iP1]
+ a_be_offd * pb[iP1]
+ a_ae_offd * pa[iP1]
+ rb[iR] * a_ae_offdm1
+ ra[iR] * a_be_offdp1;
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size
(19) and one value of constant_coefficient (1). */
HYPRE_Int
hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_ae, *a_as, *a_an;
double *a_be, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double *rap_ce, *rap_cn;
double *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
double *rap_cnw, *rap_cne;
double *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the upper triangular part.
*
* rap_cnw is pointer for northwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 19-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size
(27) and one value of constant_coefficient (0). */
HYPRE_Int
hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_ae, *a_as, *a_an;
double *a_be, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double *a_asw, *a_ase, *a_anw, *a_ane;
double *a_bnw, *a_bne;
double a_cn_offd, a_cn_offdm1, a_cn_offdp1;
double a_ce_offd, a_ce_offdm1, a_ce_offdp1;
double a_cs_offdp1, a_cw_offdp1, a_cse_offdp1, a_csw_offdp1;
double a_cne_offd, a_cne_offdm1, a_cne_offdp1;
double a_cnw_offd, a_cnw_offdm1, a_cnw_offdp1;
double a_ac_offd, a_ac_offdp1;
double a_an_offd, a_an_offdm1, a_an_offdp1;
double a_ane_offd, a_ane_offdm1, a_ane_offdp1;
double a_anw_offd, a_anw_offdm1, a_anw_offdp1;
double a_as_offd, a_as_offdp1;
double a_ase_offd, a_ase_offdp1, a_asw_offd, a_asw_offdp1;
double a_aw_offd, a_aw_offdp1;
double a_ae_offd, a_ae_offdm1, a_ae_offdp1;
double a_be_offd, a_be_offdp1;
double a_bn_offd, a_bn_offdp1;
double a_bne_offd, a_bne_offdp1, a_bnw_offd, a_bnw_offdp1;
double *rap_ce, *rap_cn;
double *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
double *rap_cnw, *rap_cne;
double *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int zOffsetA_diag;
HYPRE_Int zOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
a_anw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
a_ane = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the upper triangular part.
*
* rap_cnw is pointer for northwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
zOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
zOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 27-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1]
+ ra[iR] * a_ane[iAp1]
+ a_ane[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1]
+ ra[iR] * a_anw[iAp1]
+ a_anw[iA] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1]
+ ra[iR] * a_ase[iAp1]
+ a_ase[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1]
+ ra[iR] * a_asw[iAp1]
+ a_asw[iA] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1]
+ a_bne[iA] * pb[iP1]
+ a_ane[iA] * pa[iP1]
+ rb[iR] * a_ane[iAm1]
+ ra[iR] * a_bne[iAp1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1]
+ a_bnw[iA] * pb[iP1]
+ a_anw[iA] * pa[iP1]
+ rb[iR] * a_anw[iAm1]
+ ra[iR] * a_bnw[iAp1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - zOffsetA_offd;
iA_offdp1 = iA_offd + zOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cn_offdp1 = a_cn[iA_offdp1];
a_cne_offd = a_cne[iA_offd];
a_cne_offdm1 = a_cne[iA_offdm1];
a_cne_offdp1 = a_cne[iA_offdp1];
a_cnw_offd = a_cnw[iA_offd];
a_cnw_offdm1 = a_cnw[iA_offdm1];
a_cnw_offdp1 = a_cnw[iA_offdp1];
a_ce_offd = a_ce[iA_offd];
a_ce_offdm1 = a_ce[iA_offdm1];
a_ce_offdp1 = a_ce[iA_offdp1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cse_offdp1 = a_cse[iA_offdp1];
a_csw_offdp1 = a_csw[iA_offdp1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_ac_offd = a_ac[iA_offd];
a_ac_offdp1 = a_ac[iA_offdp1];
a_an_offd = a_an[iA_offd];
a_an_offdm1 = a_an[iA_offdm1];
a_an_offdp1 = a_an[iA_offdp1];
a_ane_offd = a_ane[iA_offd];
a_ane_offdm1 = a_ane[iA_offdm1];
a_ane_offdp1 = a_ane[iA_offdp1];
a_anw_offd = a_anw[iA_offd];
a_anw_offdm1 = a_anw[iA_offdm1];
a_anw_offdp1 = a_anw[iA_offdp1];
a_ae_offd = a_ae[iA_offd];
a_ae_offdm1 = a_ae[iA_offdm1];
a_ae_offdp1 = a_ae[iA_offdp1];
a_aw_offd = a_aw[iA_offd];
a_aw_offdp1 = a_aw[iA_offdp1];
a_as_offd = a_as[iA_offd];
a_as_offdp1 = a_as[iA_offdp1];
a_ase_offd = a_ase[iA_offd];
a_ase_offdp1 = a_ase[iA_offdp1];
a_asw_offd = a_asw[iA_offd];
a_asw_offdp1 = a_asw[iA_offdp1];
a_bn_offd = a_bn[iA_offd];
a_bn_offdp1 = a_bn[iA_offdp1];
a_bne_offd = a_bne[iA_offd];
a_bne_offdp1 = a_bne[iA_offdp1];
a_bnw_offd = a_bnw[iA_offd];
a_bnw_offdp1 = a_bnw[iA_offdp1];
a_be_offd = a_be[iA_offd];
a_be_offdp1 = a_be[iA_offdp1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - zOffsetA_diag;
iAp1 = iA + zOffsetA_diag;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne_offdp1 * pb[iP1]
+ ra[iR] * a_ane_offdp1
+ a_ane_offd * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn_offdp1 * pb[iP1]
+ ra[iR] * a_an_offdp1
+ a_an_offd * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw_offdp1 * pb[iP1]
+ ra[iR] * a_anw_offdp1
+ a_anw_offd * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1]
+ ra[iR] * a_ae_offdp1
+ a_ae_offd * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac_offd * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac_offdp1;
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1]
+ ra[iR] * a_aw_offdp1
+ a_aw_offd * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse_offdp1 * pb[iP1]
+ ra[iR] * a_ase_offdp1
+ a_ase_offd * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs_offdp1 * pb[iP1]
+ ra[iR] * a_as_offdp1
+ a_as_offd * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw_offdp1 * pb[iP1]
+ ra[iR] * a_asw_offdp1
+ a_asw_offd * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne_offd
+ rb[iR] * a_cne_offdm1 * pb[iP1]
+ ra[iR] * a_cne_offdp1 * pa[iP1]
+ a_bne_offd * pb[iP1]
+ a_ane_offd * pa[iP1]
+ rb[iR] * a_ane_offdm1
+ ra[iR] * a_bne_offdp1;
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn_offd
+ rb[iR] * a_cn_offdm1 * pb[iP1]
+ ra[iR] * a_cn_offdp1 * pa[iP1]
+ a_bn_offd * pb[iP1]
+ a_an_offd * pa[iP1]
+ rb[iR] * a_an_offdm1
+ ra[iR] * a_bn_offdp1;
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw_offd
+ rb[iR] * a_cnw_offdm1 * pb[iP1]
+ ra[iR] * a_cnw_offdp1 * pa[iP1]
+ a_bnw_offd * pb[iP1]
+ a_anw_offd * pa[iP1]
+ rb[iR] * a_anw_offdm1
+ ra[iR] * a_bnw_offdp1;
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce_offd
+ rb[iR] * a_ce_offdm1 * pb[iP1]
+ ra[iR] * a_ce_offdp1 * pa[iP1]
+ a_be_offd * pb[iP1]
+ a_ae_offd * pa[iP1]
+ rb[iR] * a_ae_offdm1
+ ra[iR] * a_be_offdp1;
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size
(27) and one value of constant_coefficient (1). */
HYPRE_Int
hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_ac, *a_aw, *a_ae, *a_as, *a_an;
double *a_be, *a_bn;
double *a_csw, *a_cse, *a_cnw, *a_cne;
double *a_asw, *a_ase, *a_anw, *a_ane;
double *a_bnw, *a_bne;
double *rap_ce, *rap_cn;
double *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an;
double *rap_cnw, *rap_cne;
double *rap_asw, *rap_ase, *rap_anw, *rap_ane;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int zOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
HYPRE_Int zOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient in same plane
* a_ce is pointer for east coefficient in same plane
* a_cs is pointer for south coefficient in same plane
* a_cn is pointer for north coefficient in same plane
* a_ac is pointer for center coefficient in plane above
* a_bc is pointer for center coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 19-point fine grid operator:
*
* a_aw is pointer for west coefficient in plane above
* a_ae is pointer for east coefficient in plane above
* a_as is pointer for south coefficient in plane above
* a_an is pointer for north coefficient in plane above
* a_bw is pointer for west coefficient in plane below
* a_be is pointer for east coefficient in plane below
* a_bs is pointer for south coefficient in plane below
* a_bn is pointer for north coefficient in plane below
* a_csw is pointer for southwest coefficient in same plane
* a_cse is pointer for southeast coefficient in same plane
* a_cnw is pointer for northwest coefficient in same plane
* a_cne is pointer for northeast coefficient in same plane
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,-1);
MapIndex(index_temp, cdir, index);
a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,-1);
MapIndex(index_temp, cdir, index);
a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point fine grid operator:
*
* a_asw is pointer for southwest coefficient in plane above
* a_ase is pointer for southeast coefficient in plane above
* a_anw is pointer for northwest coefficient in plane above
* a_ane is pointer for northeast coefficient in plane above
* a_bsw is pointer for southwest coefficient in plane below
* a_bse is pointer for southeast coefficient in plane below
* a_bnw is pointer for northwest coefficient in plane below
* a_bne is pointer for northeast coefficient in plane below
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
a_anw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
a_ane = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,-1);
MapIndex(index_temp, cdir, index);
a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,-1);
MapIndex(index_temp, cdir, index);
a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for 19-point coarse grid operator:
*
* We build only the upper triangular part (excluding diagonal).
*
* rap_ce is pointer for east coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,1);
MapIndex(index_temp, cdir, index);
rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,0,1);
MapIndex(index_temp, cdir, index);
rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,1);
MapIndex(index_temp, cdir, index);
rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,1);
MapIndex(index_temp, cdir, index);
rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 27-point coarse grid operator:
*
* A 27-point coarse grid operator is produced when the fine grid
* stencil is 19 or 27 point.
*
* We build only the upper triangular part.
*
* rap_cnw is pointer for northwest coefficient in same plane (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,1);
MapIndex(index_temp, cdir, index);
rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,1);
MapIndex(index_temp, cdir, index);
rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,1);
MapIndex(index_temp, cdir, index);
rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,1);
MapIndex(index_temp, cdir, index);
rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
zOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
zOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 27-point fine grid operator; produces upper triangular
* part of 27-point coarse grid operator. stencil entries:
* (above-northeast, above-north, above-northwest, above-east,
* above-center, above-west, above-southeast, above-south,
* above-southwest, center-northeast, center-north,
* center-northwest, and center-east).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - zOffsetA;
iAp1 = iA + zOffsetA;
iP1 = iP + zOffsetP + yOffsetP + xOffsetP;
rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1]
+ ra[iR] * a_ane[iAp1]
+ a_ane[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP;
rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]
+ ra[iR] * a_an[iAp1]
+ a_an[iA] * pb[iP1];
iP1 = iP + zOffsetP + yOffsetP - xOffsetP;
rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1]
+ ra[iR] * a_anw[iAp1]
+ a_anw[iA] * pb[iP1];
iP1 = iP + zOffsetP + xOffsetP;
rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_ae[iAp1]
+ a_ae[iA] * pb[iP1];
iP1 = iP + zOffsetP;
rap_ac[iAc] = a_ac[iA] * pb[iP1]
+ ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_ac[iAp1];
iP1 = iP + zOffsetP - xOffsetP;
rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_aw[iAp1]
+ a_aw[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP + xOffsetP;
rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1]
+ ra[iR] * a_ase[iAp1]
+ a_ase[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP;
rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]
+ ra[iR] * a_as[iAp1]
+ a_as[iA] * pb[iP1];
iP1 = iP + zOffsetP - yOffsetP - xOffsetP;
rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1]
+ ra[iR] * a_asw[iAp1]
+ a_asw[iA] * pb[iP1];
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = a_cne[iA]
+ rb[iR] * a_cne[iAm1] * pb[iP1]
+ ra[iR] * a_cne[iAp1] * pa[iP1]
+ a_bne[iA] * pb[iP1]
+ a_ane[iA] * pa[iP1]
+ rb[iR] * a_ane[iAm1]
+ ra[iR] * a_bne[iAp1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = a_cn[iA]
+ rb[iR] * a_cn[iAm1] * pb[iP1]
+ ra[iR] * a_cn[iAp1] * pa[iP1]
+ a_bn[iA] * pb[iP1]
+ a_an[iA] * pa[iP1]
+ rb[iR] * a_an[iAm1]
+ ra[iR] * a_bn[iAp1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = a_cnw[iA]
+ rb[iR] * a_cnw[iAm1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1] * pa[iP1]
+ a_bnw[iA] * pb[iP1]
+ a_anw[iA] * pa[iP1]
+ rb[iR] * a_anw[iAm1]
+ ra[iR] * a_bnw[iAp1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ a_be[iA] * pb[iP1]
+ a_ae[iA] * pa[iP1]
+ rb[iR] * a_ae[iAm1]
+ ra[iR] * a_be[iAp1];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
|
Normals.h | /* License Information
*
* Copyright (C) ONERA, The French Aerospace Lab
* Author: Alexandre BOULCH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
* to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
* PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
* OR OTHER DEALINGS IN THE SOFTWARE.
*
*
* Note that this library relies on external libraries subject to their own license.
* To use this software, you are subject to the dependencies license, these licenses
* applies to the dependency ONLY and NOT this code.
* Please refer below to the web sites for license informations:
* PCL, BOOST,NANOFLANN, EIGEN
*
* When using the software please aknowledge the corresponding publication:
* "Deep Learning for Robust Normal Estimation in Unstructured Point Clouds "
* by Alexandre Boulch and Renaud Marlet
* Symposium of Geometry Processing 2016, Computer Graphics Forum
*/
#ifndef NORMALS_HEADER
#define NORMALS_HEADER
#include <vector>
#include <iostream>
#include <ctime>
#include <math.h>
#include <string>
#include <sstream>
#include <Eigen/Dense>
#include <nanoflann.hpp>
#ifdef _OPENMP
#include <omp.h>
#define USE_OPENMP_FOR_NORMEST
#endif
class Eigen_Normal_Estimator{
private:
const Eigen::MatrixX3d& pts;/*!< Point cloud*/
Eigen::MatrixX3d& nls;/*!< Normal cloud*/
std::vector<double> densities; /*!< vector of the densities*/
//// PARAMETERS ////
int n_planes; /*!< Plane number to draw*/
int n_phi;/*!< Accumulator discretization parameter*/
int n_rot;/*!< Rotation number*/
size_t neighborhood_size; /*size of the neighborhood*/
bool use_density; /*!< use a density estimation of triplets generation*/
double tol_angle_rad;/*!< Angle parameter for cluster normal selection*/
size_t k_density; /*!< size of the neighborhood for density estimation*/
std::function<void(int)> progressCallback;
public:
//accessor
const Eigen::MatrixX3d& get_points()const {return pts;}
Eigen::MatrixX3d& get_normals(){return nls;}
int& get_T() { return n_planes; }
int& get_n_phi() { return n_phi; }
int& get_n_rot() { return n_rot; }
size_t& get_K() { return neighborhood_size; }
bool& density_sensitive() { return use_density; }
double& get_tol_angle_rad() { return tol_angle_rad; }
size_t& get_K_density() { return k_density; }
const Eigen::MatrixX3d& get_normals()const {return nls;}
const int& get_T() const {return n_planes;}
const int& get_n_phi() const {return n_phi;}
const int& get_n_rot() const {return n_rot;}
const size_t& get_K() const { return neighborhood_size; }
const bool& density_sensitive() const {return use_density;}
const double& get_tol_angle_rad() const {return tol_angle_rad;}
const size_t& get_K_density() const { return k_density; }
//// TYPE DEFINITIONS ////
typedef nanoflann::KDTreeEigenMatrixAdaptor< Eigen::MatrixX3d > kd_tree; //a row is a point
// constructor
Eigen_Normal_Estimator(const Eigen::MatrixX3d& points, Eigen::MatrixX3d& normals)
: pts(points)
, nls(normals)
{
n_planes = 700;
n_rot = 5;
n_phi = 15;
tol_angle_rad = 0.79;
neighborhood_size = 200;
use_density = false;
k_density = 5;
}
void setProgressCallback(std::function<void(int)> callback)
{
progressCallback = callback;
}
int maxProgressCounter() const
{
return pts.rows() * 2;
}
void estimate_normals()
{
/*********************************
* INIT
********************************/
//initialize the random number generator
srand(static_cast<unsigned int>(time(NULL)));
//creating vector of random int
std::vector<size_t> vecInt(1000000);
for (size_t i = 0; i < vecInt.size(); i++)
{
vecInt[i] = static_cast<size_t>(rand());
}
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for (int i = 0; i < n_planes; i++)
{
conf_interv[i] = 2.f / std::sqrt(i + 1.f);
}
//random permutation of the points (avoid thread difficult block)
std::vector<int> permutation(pts.rows());
for (int i = 0; i < pts.rows(); i++)
{
permutation[i] = i;
}
for (int i = 0; i < pts.rows(); i++)
{
int j = rand() % pts.rows();
std::swap(permutation[i], permutation[j]);
}
//creation of the rotation matrices and their inverses
std::vector<Eigen::Matrix3d> rotMat;
std::vector<Eigen::Matrix3d> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
//dimensions of the accumulator
int d1 = 2*n_phi;
int d2 = n_phi+1;
//progress
int progress = 0;
/*******************************
* ESTIMATION
******************************/
//resizing the normal point cloud
nls.resize(pts.rows(), 3);
//kd tree creation
//build de kd_tree
kd_tree tree(3, pts, 10 /* max leaf */ );
tree.index->buildIndex();
//create the density estimation for each point
densities.resize(pts.rows());
#if defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel for schedule(guided)
#endif
for (int per = 0; per < pts.rows(); per++)
{
//index of the point
int n = permutation[per];
//getting the list of neighbors
const Eigen::Vector3d& pt_query = pts.row(n);
std::vector<Eigen::MatrixX3d::Index> pointIdxSearch(k_density + 1);
std::vector<double> pointSquaredDistance(k_density + 1);
//knn for k_density+1 because the point is itself include in the search tree
tree.index->knnSearch(&pt_query[0], k_density + 1, &pointIdxSearch[0], &pointSquaredDistance[0]);
double d = 0;
for (size_t i = 0; i < pointSquaredDistance.size(); i++)
{
d += std::sqrt(pointSquaredDistance[i]);
}
d /= pointSquaredDistance.size() - 1;
densities[n] = d;
if (progressCallback)
{
progressCallback(++progress);
}
}
int rotations = std::max(n_rot,1);
//create the list of triplets in KNN case
Eigen::MatrixX3i trip;
if (!use_density)
{
list_of_triplets(trip, neighborhood_size, rotations*n_planes, vecInt);
}
#if defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel for schedule(guided)
#endif
for (int per = 0; per < pts.rows(); per++)
{
//index of the point
int n = permutation[per];
//getting the list of neighbors
std::vector<Eigen::MatrixX3d::Index> pointIdxSearch;
std::vector<double> pointSquaredDistance;
const Eigen::Vector3d& pt_query = pts.row(n);
pointIdxSearch.resize(neighborhood_size);
pointSquaredDistance.resize(neighborhood_size);
tree.index->knnSearch(&pt_query[0], neighborhood_size, &pointIdxSearch[0], &pointSquaredDistance[0]);
if (use_density)
list_of_triplets(trip, rotations*n_planes, pointIdxSearch, vecInt);
//get the points
size_t points_size = pointIdxSearch.size();
Eigen::MatrixX3d points(points_size, 3);
for (size_t pt = 0; pt<pointIdxSearch.size(); pt++)
{
points.row(pt) = pts.row(pointIdxSearch[pt]);
}
std::vector<Eigen::Vector3d> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
for (int i = 0; i < rotations; i++)
{
Eigen::MatrixX3i triplets = trip.block(i*n_planes, 0, n_planes, 3);
for (size_t pt = 0; pt < points_size; pt++)
{
points.row(pt) = rotMat[(n + i) % rotMat.size()] * points.row(pt).transpose();
}
normals_conf[i] = normal_at_point(d1, d2, points, n, triplets, conf_interv);
for (size_t pt = 0; pt < points_size; pt++)
{
points.row(pt) = pts.row(pointIdxSearch[pt]);
}
normals_vec[i] = rotMatInv[(n + i) % rotMat.size()] * nls.row(n).transpose();
}
nls.row(n) = normal_selection(rotations, normals_vec, normals_conf);
if (progressCallback)
{
progressCallback(++progress);
}
}
}
private:
// PRIVATE METHODS
/*!
* fills a vector of random rotation matrix and their inverse
* @param rotMat : table matrices to fill with rotations
* @param rotMatInv : table matrices to fill with inverse rotations
* @param rotations : number of rotations
*/
inline void generate_rotation_matrix(std::vector<Eigen::Matrix3d> &rotMat, std::vector<Eigen::Matrix3d> &rotMatInv, int rotations)
{
rotMat.clear();
rotMatInv.clear();
if (rotations == 0)
{
Eigen::Matrix3d rMat;
rMat << 1, 0, 0, 0, 1, 0, 0, 0, 1;
rotMat.push_back(rMat);
rotMatInv.push_back(rMat);
}
else
{
for (int i = 0; i < rotations; i++)
{
double theta = static_cast<double>(rand()) / RAND_MAX * 2 * M_PI;
double phi = static_cast<double>(rand()) / RAND_MAX * 2 * M_PI;
double psi = static_cast<double>(rand()) / RAND_MAX * 2 * M_PI;
Eigen::Matrix3d Rt;
Eigen::Matrix3d Rph;
Eigen::Matrix3d Rps;
Rt << 1, 0, 0, 0, cos(theta), -sin(theta), 0, sin(theta), cos(theta);
Rph << cos(phi), 0, sin(phi), 0, 1, 0, -sin(phi), 0, cos(phi);
Rps << cos(psi), -sin(psi), 0, sin(psi), cos(psi), 0, 0, 0, 1;
Eigen::Matrix3d Rtinv;
Eigen::Matrix3d Rphinv;
Eigen::Matrix3d Rpsinv;
Rtinv << 1, 0, 0, 0, cos(theta), sin(theta), 0, -sin(theta), cos(theta);
Rphinv << cos(phi), 0, -sin(phi), 0, 1, 0, sin(phi), 0, cos(phi);
Rpsinv << cos(psi), sin(psi), 0, -sin(psi), cos(psi), 0, 0, 0, 1;
Eigen::Matrix3d rMat = Rt*Rph*Rps;
Eigen::Matrix3d rMatInv = Rpsinv*Rphinv*Rtinv;
rotMat.push_back(rMat);
rotMatInv.push_back(rMatInv);
}
}
}
/*!
* generates a list of triplets
* @param triplets : table of 3-vector to fill with the indexes of the points
* @param number_of_points : number of points to consider
* @param triplet_number : number of triplets to generate
* @param vecRandInt : table of random int
*/
inline void list_of_triplets(Eigen::MatrixX3i &triplets,
size_t number_of_points,
size_t triplet_number,
const std::vector<size_t> &vecRandInt)
{
size_t S = vecRandInt.size();
triplets.resize(triplet_number, 3);
size_t pos = vecRandInt[0] % S;
for (size_t i = 0; i < triplet_number; i++)
{
do
{
triplets(i, 0) = static_cast<int>(vecRandInt[pos % S] % number_of_points);
triplets(i, 1) = static_cast<int>(vecRandInt[(pos + vecRandInt[(pos + 1) % S]) % S] % number_of_points);
triplets(i, 2) = static_cast<int>(vecRandInt[(pos + vecRandInt[(pos + 1 + vecRandInt[(pos + 2) % S]) % S]) % S] % number_of_points);
pos += vecRandInt[(pos + 3) % S] % S;
}
while (triplets(i, 0) == triplets(i, 1) || triplets(i, 1) == triplets(i, 2) || triplets(i, 2) == triplets(i, 0));
}
}
/*!
* dichotomic search in sorted vector, find the nearest neighbor
* @param elems : sorted vector containing the elements for comparison
* @param d : element to search for in elems
* @return the index of the nearest neighbor of d in elems
*/
//return the index of the nearest element in the vector
int dichotomic_search_nearest(const std::vector<double> elems, double d){
size_t i1 = 0;
size_t i2 = elems.size() - 1;
size_t i3;
while(i2 > i1){
i3 = (i1+i2)/2;
if(elems[i3] == d){break;}
if(d < elems[i3]){i2 = i3;}
if(d > elems[i3]){i1 = i3;}
}
return static_cast<int>(i3);
}
/*!
* generates a list of triplets
* @param triplets : table of 3-vector to fill with the indexes of the points
* @param triplet_number : number of triplets to generate
* @param pointIdxSearch : index of the points used for triplets
* @param vecRandInt : table of random int
*/
inline void list_of_triplets(Eigen::MatrixX3i &triplets,
size_t triplet_number,
const std::vector<Eigen::MatrixX3d::Index>& pointIdxSearch,
const std::vector<size_t> &vecRandInt)
{
std::vector<double> dists;
double sum = 0;
for (size_t i = 0; i < pointIdxSearch.size(); i++)
{
sum += densities[pointIdxSearch[i]];
dists.push_back(sum);
}
size_t S = vecRandInt.size();
size_t number_of_points = pointIdxSearch.size();
triplets.resize(triplet_number, 3);
size_t pos = vecRandInt[0] % S;;
for (size_t i = 0; i < triplet_number; i++)
{
do
{
double d = (vecRandInt[pos % S] + 0.) / RAND_MAX *sum;
triplets(i, 0) = dichotomic_search_nearest(dists, d);
d = (vecRandInt[(pos + vecRandInt[(pos + 1) % S]) % S] + 0.) / RAND_MAX;
triplets(i, 1) = dichotomic_search_nearest(dists, d);
d = (vecRandInt[(pos + vecRandInt[(pos + 1 + vecRandInt[(pos + 2) % S]) % S]) % S] + 0.) / RAND_MAX;
triplets(i, 2) = dichotomic_search_nearest(dists, d);
pos += vecRandInt[(pos + 3) % S] % S;
}
while (triplets(i, 0) == triplets(i, 1) || triplets(i, 1) == triplets(i, 2) || triplets(i, 2) == triplets(i, 0));
}
}
/*!
* Compute the normal by filling an accumulator for a given neighborhood
* @param d1 - First dimension of the accumulator
* @param d2 - Second dimension of the accumulator
* @param points - table of neighbors
* @param n - index of the point where the normal is computed
* @param triplets - table of triplets
* @param conf_interv - table of confidence intervals
*/
float normal_at_point(
const int d1, const int d2,
const Eigen::MatrixX3d& points,
int n,
Eigen::MatrixX3i &triplets,
std::vector<float> &conf_interv)
{
if (points.size() < 3)
{
nls.row(n).setZero();
return 0;
}
//creation and initialization accumulators
std::vector<double> votes(d1*d2);
std::vector<Eigen::Vector3d> votesV(d1*d2);
for (int i = 0; i < d1; i++)
{
for (int j = 0; j < d2; j++)
{
votes[i + j*d1] = 0;
votesV[i + j*d1] = Eigen::Vector3d(0, 0, 0);
}
}
float max1 = 0;
int i1 = 0, i2 = 0;
int j1 = 0, j2 = 0;
for (int n_try = 0; n_try < n_planes; n_try++)
{
int p0 = triplets(n_try,0);
int p1 = triplets(n_try,1);
int p2 = triplets(n_try,2);
Eigen::Vector3d v1 = points.row(p1).transpose()-points.row(p0).transpose();
Eigen::Vector3d v2 = points.row(p2).transpose()-points.row(p0).transpose();
Eigen::Vector3d Pn = v1.cross(v2);
Pn.normalize();
if(Pn.dot(points.row(p0).transpose())>0){
Pn = -Pn;
}
double phi = acos(Pn[2]);
double dphi = M_PI / n_phi;
int posp = static_cast<int>(floor((phi + dphi / 2) * n_phi / M_PI));
int post;
if (posp == 0 || posp == n_phi)
{
post = 0;
}
else
{
double theta = acos(Pn[0] / sqrt(Pn[0] * Pn[0] + Pn[1] * Pn[1]));
if (Pn[1] < 0)
{
theta *= -1;
theta += 2 * M_PI;
}
double dtheta = M_PI / (n_phi*sin(posp*dphi));
post = static_cast<int>(floor((theta + dtheta / 2) / dtheta)) % (2 * n_phi);
}
post = std::max(0, std::min(2 * n_phi - 1, post));
posp = std::max(0, std::min(n_phi, posp));
votes[post + posp*d1] += 1.;
votesV[post + posp*d1] += Pn;
max1 = votes[i1 + j1*d1] / (n_try + 1);
float max2 = votes[i2 + j2*d1] / (n_try + 1);
float votes_val = votes[post + posp*d1] / (n_try + 1);
if (votes_val > max1)
{
max2 = max1;
i2 = i1;
j2 = j1;
max1 = votes_val;
i1 = post;
j1 = posp;
}
else if (votes_val > max2 && post != i1 && posp != j1)
{
max2 = votes_val;
i2 = post;
j2 = posp;
}
if (max1 - conf_interv[n_try] > max2)
{
break;
}
}
votesV[i1 + j1*d1].normalize();
nls.row(n) = votesV[i1 + j1*d1];
return max1;
}
/*!
* Compute the normal depending of the estimation choice (mean, best, cluster)
* @param rotations - number of rotations
* @param normals_vec - table of estimated normals for the point
* @param normals_conf - table of the confidence of normals
*/
inline Eigen::Vector3d normal_selection(int rotations,
std::vector<Eigen::Vector3d> &normals_vec,
const std::vector<float> &normals_conf){
std::vector<bool> normals_use(rotations, true);
//alignement of normals
for (int i = 1; i < rotations; i++)
{
if (normals_vec[0].dot(normals_vec[i]) < 0)
{
normals_vec[i] *= -1;
}
}
Eigen::Vector3d normal_final;
std::vector< std::pair<Eigen::Vector3d, float> > normals_fin;
int number_to_test = rotations;
while (number_to_test > 0)
{
//getting the max
float max_conf = 0;
int idx = 0;
for (int i = 0; i < rotations; i++)
{
if (normals_use[i] && normals_conf[i] > max_conf)
{
max_conf = normals_conf[i];
idx = i;
}
}
normals_fin.push_back(std::pair<Eigen::Vector3d, float>(normals_vec[idx] * normals_conf[idx], normals_conf[idx]));
normals_use[idx] = false;
number_to_test--;
for (int i = 0; i < rotations; i++)
{
if (normals_use[i] && acos(normals_vec[idx].dot(normals_vec[i])) < tol_angle_rad)
{
normals_use[i] = false;
number_to_test--;
normals_fin.back().first += normals_vec[i] * normals_conf[i];
normals_fin.back().second += normals_conf[i];
}
}
}
normal_final = normals_fin[0].first;
float conf_fin = normals_fin[0].second;
for (size_t i = 1; i < normals_fin.size(); i++)
{
if (normals_fin[i].second > conf_fin)
{
conf_fin = normals_fin[i].second;
normal_final = normals_fin[i].first;
}
}
normal_final.normalize();
return normal_final;
}
};
#endif
|
threshold.c | /* Copyright 2014. The Regents of the University of California.
* Copyright 2015-2017. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2015-2016 Jon Tamir <jtamir@eecs.berkeley.edu>
* 2015 Frank Ong <frankong@berkeley.edu>
*/
#include <stdbool.h>
#include <complex.h>
#include "num/flpmath.h"
#include "num/multind.h"
#include "num/init.h"
#include "num/ops_p.h"
#include "iter/prox.h"
#include "iter/thresh.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "misc/opts.h"
#include "lowrank/lrthresh.h"
#include "linops/waveop.h"
#include "dfwavelet/prox_dfwavelet.h"
// FIXME: lowrank interface should not be coupled to mri.h -- it should take D as an input
#ifndef DIMS
#define DIMS 16
#endif
// FIXME: consider moving this to a more accessible location?
static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float* out, const complex float* in)
{
long minsize[D];
md_singleton_dims(D, minsize);
long course_scale[3] = MD_INIT_ARRAY(3, 16);
md_copy_dims(3, minsize, course_scale);
unsigned int wflags = 7; // FIXME
for (unsigned int i = 0; i < 3; i++)
if (dims[i] < minsize[i])
wflags = MD_CLEAR(wflags, i);
long strs[D];
md_calc_strides(D, strs, dims, CFL_SIZE);
const struct linop_s* w = linop_wavelet_create(D, wflags, dims, strs, minsize, false);
const struct operator_p_s* p = prox_unithresh_create(D, w, lambda, flags);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float* out, const complex float* in)
{
long blkdims[MAX_LEV][D];
int levels = llr_blkdims(blkdims, ~flags, dims, llrblk);
UNUSED(levels);
const struct operator_p_s* p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false, false);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long minsize[3];
md_singleton_dims(3, minsize);
long coarse_scale[3] = MD_INIT_ARRAY(3, 16);
md_min_dims(3, ~0u, minsize, dims, coarse_scale);
complex float res[3];
res[0] = 1.;
res[1] = 1.;
res[2] = 1.;
assert(3 == dims[TE_DIM]);
const struct operator_p_s* p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false);
operator_p_apply(p, 1., D, dims, out, D, dims, in);
operator_p_free(p);
}
static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in)
{
long size = md_calc_size(DIMS, dims) * 2;
const float* inf = (const float*)in;
float* outf = (float*)out;
#pragma omp parallel for
for (long i = 0; i < size; i++)
outf[i] = inf[i] > lambda ? inf[i] : 0.;
}
static const char usage_str[] = "lambda <input> <output>";
static const char help_str[] = "Perform (soft) thresholding with parameter lambda.";
int main_threshold(int argc, char* argv[argc])
{
unsigned int flags = 0;
enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE;
int llrblk = 8;
const struct opt_s opts[] = {
OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"),
OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"),
OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"),
OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"),
OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"),
OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"),
};
cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts);
num_init();
const int N = DIMS;
long dims[N];
complex float* idata = load_cfl(argv[2], N, dims);
complex float* odata = create_cfl(argv[3], N, dims);
float lambda = atof(argv[1]);
switch (th_type) {
case WAV:
wthresh(N, dims, lambda, flags, odata, idata);
break;
case LLR:
lrthresh(N, dims, llrblk, lambda, flags, odata, idata);
break;
case DFW:
dfthresh(N, dims, lambda, odata, idata);
break;
case HARD:
hard_thresh(N, dims, lambda, odata, idata);
break;
default:
md_zsoftthresh(N, dims, lambda, flags, odata, idata);
}
unmap_cfl(N, dims, idata);
unmap_cfl(N, dims, odata);
return 0;
}
|
bs_omp.c |
#include <assert.h>
#include <getopt.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include "shared.hpp"
#include "timer.h"
#define DTYPE uint64_t
/*
* @brief creates a "test file" by filling a bufferwith values
*/
void create_test_file(DTYPE *input, uint64_t nr_elements, DTYPE *querys, uint64_t n_querys) {
uint64_t max = UINT64_MAX;
uint64_t min = 0;
srand(time(NULL));
input[0] = 1;
for (uint64_t i = 1; i < nr_elements; i++) {
input[i] = input[i - 1] + (rand() % 10) + 1;
}
for (uint64_t i = 0; i < n_querys; i++) {
querys[i] = input[rand() % (nr_elements - 2)];
}
}
/**
* @brief compute output in the host
*/
uint64_t binarySearch(DTYPE *input, uint64_t input_size, DTYPE *querys, unsigned n_querys) {
uint64_t found = -1;
uint64_t q, r, l, m;
#pragma omp parallel for private(q, r, l, m)
for (q = 0; q < n_querys; q++) {
l = 0;
r = input_size;
while (l <= r) {
m = l + (r - l) / 2;
// Check if x is present at mid
if (input[m] == querys[q]) {
found += m;
break;
}
// If x greater, ignore left half
if (input[m] < querys[q])
l = m + 1;
// If x is smaller, ignore right half
else
r = m - 1;
}
}
return found;
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
Timer timer;
uint64_t input_size = atol(argv[1]);
uint64_t n_querys = atol(argv[2]);
printf("Vector size: %lu, num searches: %lu\n", input_size, n_querys);
DTYPE *input = malloc((input_size) * sizeof(DTYPE));
DTYPE *querys = malloc((n_querys) * sizeof(DTYPE));
DTYPE result_host = -1;
// Create an input file with arbitrary data.
create_test_file(input, input_size, querys, n_querys);
start(&timer, 0, 0);
start_region();
result_host = binarySearch(input, input_size - 1, querys, n_querys);
end_region();
stop(&timer, 0);
int status = (result_host);
if (status) {
printf("[OK] Execution time: ");
print(&timer, 0, 1);
printf("ms.\n");
} else {
printf("[ERROR]\n");
}
free(input);
return status ? 0 : 1;
}
|
NLmean_propag2dirs_sspacing3_tspacing6_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing3.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag2dirs_sspacing3_tspacing6_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 3
#define SCALE_FACTOR_TIME 6
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
propag_towardcenter(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
GraphBLAS.h | //------------------------------------------------------------------------------
// GraphBLAS.h: definitions for the GraphBLAS package
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS is a complete implementation of the GraphBLAS
// standard, which defines a set of sparse matrix operations on an extended
// algebra of semirings, using an almost unlimited variety of operators and
// types. When applied to sparse adjacency matrices, these algebraic
// operations are equivalent to computations on graphs. GraphBLAS provides a
// powerful and expressive framework creating graph algorithms based on the
// elegant mathematics of sparse matrix operations on a semiring.
// This GraphBLAS.h file contains GraphBLAS definitions for user applications
// to #include. A few functions and variables with the prefix GB_ need to be
// defined in this file and are thus technically visible to the user, but they
// must not be accessed in user code. They are here only so that the ANSI C11
// _Generic feature can be used in the user-accessible polymorphic functions,
// or to implement a fast GxB_Iterator using macros.
// This implementation conforms to the GraphBLAS API Specification and also
// includes functions and features that are extensions to the spec, which are
// given names of the form GxB_* for functions, built-in objects, and macros,
// so it is clear which are in the spec and which are extensions. Extensions
// with the name GxB_* are user-accessible in SuiteSparse:GraphBLAS but cannot
// be guaranteed to appear in all GraphBLAS implementations.
// Regarding "historical" functions and symbols: when a GxB_* function or
// symbol is added to the C API Specification, the new GrB_* name should be
// used instead. The old GxB_* name will be kept for historical reasons,
// documented here and in working order; it might no longer be mentioned in the
// user guide. Historical functions and symbols would only be removed in the
// rare case that they cause a serious conflict with future methods.
#ifndef GRAPHBLAS_H
#define GRAPHBLAS_H
//==============================================================================
// include files required by GraphBLAS
//==============================================================================
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <stddef.h>
#include <limits.h>
#include <math.h>
#include <stdarg.h>
//==============================================================================
// renaming for use in R2021a or later
//==============================================================================
#define GB_CAT2(x,y) x ## y
#define GB_EVAL2(x,y) GB_CAT2 (x,y)
#ifdef GBRENAME
// All symbols must be renamed for the @GrB interface when using
// R2021a and following, since those versions include an earlier
// version of SuiteSparse:GraphBLAS.
#define GB(x) GB_EVAL2 (GM_, x)
#define GRB(x) GB_EVAL2 (GrM_, x)
#define GXB(x) GB_EVAL2 (GxM_, x)
#define GrB GrM
#define GxB GxM
#include "GB_rename.h"
#else
// Use the standard GraphBLAS prefix.
#define GB(x) GB_EVAL2 (GB_, x)
#define GRB(x) GB_EVAL2 (GrB_, x)
#define GXB(x) GB_EVAL2 (GxB_, x)
#endif
//==============================================================================
// compiler variations
//==============================================================================
// Exporting/importing symbols for Microsoft Visual Studio
#if ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) )
#ifdef GB_LIBRARY
// compiling SuiteSparse:GraphBLAS itself, exporting symbols to user apps
#define GB_PUBLIC extern __declspec ( dllexport )
#else
// compiling the user application, importing symbols from SuiteSparse:GraphBLAS
#define GB_PUBLIC extern __declspec ( dllimport )
#endif
#else
// for other compilers
#define GB_PUBLIC extern
#endif
// GraphBLAS requires an ANSI C11 compiler for its polymorphic functions (using
// the _Generic keyword), but it can be used in an C90 compiler if those
// functions are disabled.
// With ANSI C11 and later, _Generic keyword and polymorphic functions can be
// used. Earlier versions of the language do not have this feature.
#ifdef __STDC_VERSION__
// ANSI C17: 201710L
// ANSI C11: 201112L
// ANSI C99: 199901L
// ANSI C95: 199409L
#define GxB_STDC_VERSION __STDC_VERSION__
#else
// assume ANSI C90 / C89
#define GxB_STDC_VERSION 199001L
#endif
//------------------------------------------------------------------------------
// definitions for complex types, and restrict keyword
//------------------------------------------------------------------------------
#undef GB_restrict
// See:
// https://www.drdobbs.com/complex-arithmetic-in-the-intersection-o/184401628#
#if defined ( __cplusplus )
extern "C++"
{
// C++ complex types
#include <cmath>
#include <complex>
#undef I
typedef std::complex<float> GxB_FC32_t ;
typedef std::complex<double> GxB_FC64_t ;
}
#define GxB_CMPLXF(r,i) GxB_FC32_t(r,i)
#define GxB_CMPLX(r,i) GxB_FC64_t(r,i)
#define GB_restrict
#elif ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) )
// Microsoft Windows complex types
#include <complex.h>
#undef I
typedef _Fcomplex GxB_FC32_t ;
typedef _Dcomplex GxB_FC64_t ;
#define GxB_CMPLXF(r,i) (_FCbuild (r,i))
#define GxB_CMPLX(r,i) ( _Cbuild (r,i))
#define GB_restrict __restrict
#else
// ANSI C11 complex types
#include <complex.h>
#undef I
typedef float complex GxB_FC32_t ;
typedef double complex GxB_FC64_t ;
#ifndef CMPLX
// gcc 6.2 on the the Mac doesn't #define CMPLX
#define GxB_CMPLX(r,i) \
((GxB_FC64_t)((double)(r)) + (GxB_FC64_t)((double)(i) * _Complex_I))
#else
// use the ANSI C11 CMPLX macro
#define GxB_CMPLX(r,i) CMPLX (r,i)
#endif
#ifndef CMPLXF
// gcc 6.2 on the the Mac doesn't #define CMPLXF
#define GxB_CMPLXF(r,i) \
((GxB_FC32_t)((float)(r)) + (GxB_FC32_t)((float)(i) * _Complex_I))
#else
// use the ANSI C11 CMPLXF macro
#define GxB_CMPLXF(r,i) CMPLXF (r,i)
#endif
// restrict keyword
#if defined ( __NVCC__ )
// NVIDIA nvcc
#define GB_restrict __restrict__
#elif GxB_STDC_VERSION >= 199901L
// ANSI C99 or later
#define GB_restrict restrict
#else
// ANSI C95 and earlier: no restrict keyword
#define GB_restrict
#endif
#endif
//==============================================================================
// version control
//==============================================================================
// There are two version numbers that user codes can check against with
// compile-time #if tests: the version of this GraphBLAS implementation,
// and the version of the GraphBLAS specification it conforms to. User code
// can use tests like this:
//
// #if GxB_SPEC_VERSION >= GxB_VERSION (2,0,3)
// ... use features in GraphBLAS specification 2.0.3 ...
// #else
// ... only use features in early specifications
// #endif
//
// #if GxB_IMPLEMENTATION > GxB_VERSION (1,4,0)
// ... use features from version 1.4.0 of a GraphBLAS package
// #endif
// X_GRAPHBLAS: names this particular implementation:
#define GxB_SUITESPARSE_GRAPHBLAS
// GxB_VERSION: a single integer for comparing spec and version levels
#define GxB_VERSION(major,minor,sub) \
(((major)*1000ULL + (minor))*1000ULL + (sub))
// The version of this implementation, and the GraphBLAS API version:
#define GxB_IMPLEMENTATION_NAME "SuiteSparse:GraphBLAS"
#define GxB_IMPLEMENTATION_DATE "Apr 8, 2022"
#define GxB_IMPLEMENTATION_MAJOR 7
#define GxB_IMPLEMENTATION_MINOR 0
#define GxB_IMPLEMENTATION_SUB 3
#define GxB_SPEC_DATE "Nov 15, 2021"
#define GxB_SPEC_MAJOR 2
#define GxB_SPEC_MINOR 0
#define GxB_SPEC_SUB 0
// compile-time access to the C API Version number of this library.
#define GRB_VERSION GxB_SPEC_MAJOR
#define GRB_SUBVERSION GxB_SPEC_MINOR
#define GxB_IMPLEMENTATION \
GxB_VERSION (GxB_IMPLEMENTATION_MAJOR, \
GxB_IMPLEMENTATION_MINOR, \
GxB_IMPLEMENTATION_SUB)
// The 'about' string the describes this particular implementation of GraphBLAS:
#define GxB_IMPLEMENTATION_ABOUT \
"SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \
"\nhttp://suitesparse.com Dept of Computer Sci. & Eng, Texas A&M University.\n"
// The GraphBLAS license for this particular implementation of GraphBLAS:
#define GxB_IMPLEMENTATION_LICENSE \
"SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \
"\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\n"\
"not use SuiteSparse:GraphBLAS except in compliance with the License. You\n" \
"may obtain a copy of the License at\n\n" \
" http://www.apache.org/licenses/LICENSE-2.0\n\n" \
"Unless required by applicable law or agreed to in writing, software\n" \
"distributed under the License is distributed on an \"AS IS\" BASIS,\n" \
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" \
"See the License for the specific language governing permissions and\n" \
"limitations under the License.\n"
//------------------------------------------------------------------------------
// GraphBLAS C API version
//------------------------------------------------------------------------------
#define GxB_SPEC_VERSION GxB_VERSION(GxB_SPEC_MAJOR,GxB_SPEC_MINOR,GxB_SPEC_SUB)
// The 'spec' string describes the GraphBLAS spec:
#define GxB_SPEC_ABOUT \
"GraphBLAS C API, by Aydin Buluc, Timothy Mattson, Scott McMillan,\n" \
"Jose' Moreira, Carl Yang, and Benjamin Brock. Based on 'GraphBLAS\n" \
"Mathematics by Jeremy Kepner. See also 'Graph Algorithms in the Language\n" \
"of Linear Algebra,' edited by J. Kepner and J. Gilbert, SIAM, 2011.\n"
//==============================================================================
// GrB_Index: the GraphBLAS integer
//==============================================================================
// GrB_Index: row or column index, or matrix dimension. This typedef is used
// for row and column indices, or matrix and vector dimensions.
typedef uint64_t GrB_Index ;
// GrB_INDEX_MAX is the largest permissible index value. The largest valid
// matrix or vector dimension is GrB_INDEX_MAX+1, or 2^60 in SuiteSparse:GrB.
#define GrB_INDEX_MAX ((GrB_Index) (1ULL << 60) - 1)
// GxB_INDEX_MAX is historical; use GrB_INDEX_MAX+1 instead. It differs by one
// from GrB_INDEX_MAX, since it defined the largest valid matrix or vector
// dimension.
#define GxB_INDEX_MAX ((GrB_Index) (1ULL << 60))
//==============================================================================
// GraphBLAS error and informational codes
//==============================================================================
// All GraphBLAS functions return a code that indicates if it was successful
// or not. If more information is required, the GrB_error function can be
// called, which returns a string that provides more information on the last
// return value from GraphBLAS.
// The v1.3 C API did not specify the enum values, but they appear in v2.0.
// Changing them will require SuiteSparse:GraphBLAS to bump to v6.x.
// Error codes GrB_NOT_IMPLEMENTED and GrB_EMPTY_OBJECT are new to v2.0.
typedef enum
{
GrB_SUCCESS = 0, // all is well
//--------------------------------------------------------------------------
// informational codes, not an error:
//--------------------------------------------------------------------------
GrB_NO_VALUE = 1, // A(i,j) requested but not there
GxB_EXHAUSTED = 2, // iterator is exhausted
//--------------------------------------------------------------------------
// errors:
//--------------------------------------------------------------------------
GrB_UNINITIALIZED_OBJECT = -1, // object has not been initialized
GrB_NULL_POINTER = -2, // input pointer is NULL
GrB_INVALID_VALUE = -3, // generic error; some value is bad
GrB_INVALID_INDEX = -4, // row or column index is out of bounds
GrB_DOMAIN_MISMATCH = -5, // object domains are not compatible
GrB_DIMENSION_MISMATCH = -6, // matrix dimensions do not match
GrB_OUTPUT_NOT_EMPTY = -7, // output matrix already has values
GrB_NOT_IMPLEMENTED = -8, // method not implemented
GrB_PANIC = -101, // unknown error
GrB_OUT_OF_MEMORY = -102, // out of memory
GrB_INSUFFICIENT_SPACE = -103, // output array not large enough
GrB_INVALID_OBJECT = -104, // object is corrupted
GrB_INDEX_OUT_OF_BOUNDS = -105, // row or col index out of bounds
GrB_EMPTY_OBJECT = -106 // an object does not contain a value
}
GrB_Info ;
//==============================================================================
// GrB_init / GrB_finalize
//==============================================================================
// GrB_init must called before any other GraphBLAS operation. GrB_finalize
// must be called as the last GraphBLAS operation.
// GrB_init defines the mode that GraphBLAS will use: blocking or
// non-blocking. With blocking mode, all operations finish before returning to
// the user application. With non-blocking mode, operations can be left
// pending, and are computed only when needed.
// The extension GxB_init does the work of GrB_init, but it also defines the
// memory management functions that SuiteSparse:GraphBLAS will use internally.
typedef enum
{
GrB_NONBLOCKING = 0, // methods may return with pending computations
GrB_BLOCKING = 1 // no computations are ever left pending
}
GrB_Mode ;
GB_PUBLIC
GrB_Info GrB_init // start up GraphBLAS
(
GrB_Mode mode // blocking or non-blocking mode
) ;
GB_PUBLIC
GrB_Info GxB_init // start up GraphBLAS and also define malloc, etc
(
GrB_Mode mode, // blocking or non-blocking mode
// pointers to memory management functions
void * (* user_malloc_function ) (size_t),
void * (* user_calloc_function ) (size_t, size_t),
void * (* user_realloc_function ) (void *, size_t),
void (* user_free_function ) (void *)
) ;
GB_PUBLIC
GrB_Info GrB_finalize (void) ; // finish GraphBLAS
//==============================================================================
// GrB_getVersion: GraphBLAS C API version
//==============================================================================
// GrB_getVersion provides a runtime access of the C API Version.
GB_PUBLIC
GrB_Info GrB_getVersion // runtime access to C API version number
(
unsigned int *version, // returns GRB_VERSION
unsigned int *subversion // returns GRB_SUBVERSION
) ;
//==============================================================================
// GrB_Descriptor: the GraphBLAS descriptor
//==============================================================================
// The GrB_Descriptor is used to modify the behavior of GraphBLAS operations.
//
// GrB_OUTP: can be GxB_DEFAULT or GrB_REPLACE. If GrB_REPLACE, then C is
// cleared after taking part in the accum operation but before the mask.
// In other words, C<Mask> = accum (C,T) is split into Z = accum(C,T) ;
// C=0 ; C<Mask> = Z.
//
// GrB_MASK: can be GxB_DEFAULT, GrB_COMP, GrB_STRUCTURE, or set to both
// GrB_COMP and GrB_STRUCTURE. If GxB_DEFAULT, the mask is used
// normally, where Mask(i,j)=1 means C(i,j) can be modified by C<Mask>=Z,
// and Mask(i,j)=0 means it cannot be modified even if Z(i,j) is has been
// computed and differs from C(i,j). If GrB_COMP, this is the same as
// taking the logical complement of the Mask. If GrB_STRUCTURE is set,
// the value of the mask is not considered, just its pattern. The
// GrB_COMP and GrB_STRUCTURE settings can be combined.
//
// GrB_INP0: can be GxB_DEFAULT or GrB_TRAN. If GxB_DEFAULT, the first input
// is used as-is. If GrB_TRAN, it is transposed. Only matrices are
// transposed this way. Vectors are never transposed via the
// GrB_Descriptor.
//
// GrB_INP1: the same as GrB_INP0 but for the second input
//
// GxB_NTHREADS: the maximum number of threads to use in the current method.
// If <= GxB_DEFAULT (which is zero), then the number of threads is
// determined automatically. This is the default value.
//
// GxB_CHUNK: an integer parameter that determines the number of threads to use
// for a small problem. If w is the work to be performed, and chunk is
// the value of this parameter, then the # of threads is limited to floor
// (w/chunk). The default chunk is currently 64K, but this may change in
// the future. If chunk is set to <= GxB_DEFAULT (that is, zero), the
// default is used.
//
// GxB_AxB_METHOD: this is a hint to SuiteSparse:GraphBLAS on which algorithm
// it should use to compute C=A*B, in GrB_mxm, GrB_mxv, and GrB_vxm.
// SuiteSparse:GraphBLAS has four different heuristics, and the default
// method (GxB_DEFAULT) selects between them automatically. The complete
// rule is in the User Guide. The brief discussion here assumes all
// matrices are stored by column. All methods compute the same result,
// except that floating-point roundoff may differ when working on
// floating-point data types.
//
// GxB_AxB_SAXPY: C(:,j)=A*B(:,j) is computed using a mix of Gustavson
// and Hash methods. Each task in the parallel computation makes its
// own decision between these two methods, via a heuristic.
//
// GxB_AxB_GUSTAVSON: This is the same as GxB_AxB_SAXPY, except that
// every task uses Gustavon's method, computing C(:,j)=A*B(:,j) via a
// gather/scatter workspace of size equal to the number of rows of A.
// Very good general-purpose method, but sometimes the workspace can
// be too large when many threads are used.
//
// GxB_AxB_HASH: This is the same as GxB_AxB_SAXPY, except that every
// task uses the Hash method. It is very good for hypersparse
// matrices and uses very little workspace, and so it scales well to
// many threads.
//
// GxB_AxB_DOT: computes C(i,j) = A(:,i)'*B(:,j), for each entry C(i,j).
// A very specialized method that works well only if the mask is
// present, very sparse, and not complemented, or when C is a dense
// vector or matrix, or when C is small.
//
// GxB_SORT: GrB_mxm and other methods may return a matrix in a 'jumbled'
// state, with indices out of order. The sort is left pending. Some
// methods can tolerate jumbled matrices on input, so this can be faster.
// However, in some cases, it can be faster for GrB_mxm to sort its output
// as it is computed. With GxB_SORT set to GxB_DEFAULT, the sort is left
// pending. With GxB_SORT set to a nonzero value, GrB_mxm typically sorts
// the resulting matrix C (but not always; this is just a hint). If
// GrB_init is called with GrB_BLOCKING mode, the sort will always be
// done, and this setting has no effect.
//
// GxB_COMPRESSION: compression method for GxB_Matrix_serialize and
// GxB_Vector_serialize. The default is LZ4.
//
// GxB_IMPORT: GxB_FAST_IMPORT (faster, for trusted input data) or
// GxB_SECURE_IMPORT (slower, for untrusted input data), for the
// GxB*_pack* methods.
// The following are enumerated values in both the GrB_Desc_Field and the
// GxB_Option_Field for global options. They are defined with the same integer
// value for both enums, so the user can use them for both.
#define GxB_NTHREADS 5
#define GxB_CHUNK 7
// GPU control (DRAFT: in progress, do not use)
#define GxB_GPU_CONTROL 21
#define GxB_GPU_CHUNK 22
typedef enum
{
GrB_OUTP = 0, // descriptor for output of a method
GrB_MASK = 1, // descriptor for the mask input of a method
GrB_INP0 = 2, // descriptor for the first input of a method
GrB_INP1 = 3, // descriptor for the second input of a method
GxB_DESCRIPTOR_NTHREADS = GxB_NTHREADS, // max number of threads to use.
// If <= GxB_DEFAULT, then GraphBLAS selects the number
// of threads automatically.
GxB_DESCRIPTOR_CHUNK = GxB_CHUNK, // chunk size for small problems.
// If <= GxB_DEFAULT, then the default is used.
// GPU control (DRAFT: in progress, do not use)
GxB_DESCRIPTOR_GPU_CONTROL = GxB_GPU_CONTROL,
GxB_DESCRIPTOR_GPU_CHUNK = GxB_GPU_CHUNK,
GxB_AxB_METHOD = 1000, // descriptor for selecting C=A*B algorithm
GxB_SORT = 35, // control sort in GrB_mxm
GxB_COMPRESSION = 36, // select compression for serialize
GxB_IMPORT = 37, // secure vs fast import
}
GrB_Desc_Field ;
typedef enum
{
// for all GrB_Descriptor fields:
GxB_DEFAULT = 0, // default behavior of the method
// for GrB_OUTP only:
GrB_REPLACE = 1, // clear the output before assigning new values to it
// for GrB_MASK only:
GrB_COMP = 2, // use the structural complement of the input
GrB_STRUCTURE = 4, // use the only pattern of the mask, not its values
// for GrB_INP0 and GrB_INP1 only:
GrB_TRAN = 3, // use the transpose of the input
// for GxB_GPU_CONTROL only (DRAFT: in progress, do not use)
GxB_GPU_ALWAYS = 2001,
GxB_GPU_NEVER = 2002,
// for GxB_AxB_METHOD only:
GxB_AxB_GUSTAVSON = 1001, // gather-scatter saxpy method
GxB_AxB_DOT = 1003, // dot product
GxB_AxB_HASH = 1004, // hash-based saxpy method
GxB_AxB_SAXPY = 1005, // saxpy method (any kind)
// for GxB_IMPORT only:
GxB_SECURE_IMPORT = 502 // GxB*_pack* methods trust their input data
}
GrB_Desc_Value ;
// default for GxB pack is to trust the input data
#define GxB_FAST_IMPORT GxB_DEFAULT
typedef struct GB_Descriptor_opaque *GrB_Descriptor ;
GB_PUBLIC
GrB_Info GrB_Descriptor_new // create a new descriptor
(
GrB_Descriptor *descriptor // handle of descriptor to create
) ;
GB_PUBLIC
GrB_Info GrB_Descriptor_set // set a parameter in a descriptor
(
GrB_Descriptor desc, // descriptor to modify
GrB_Desc_Field field, // parameter to change
GrB_Desc_Value val // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Descriptor_get // get a parameter from a descriptor
(
GrB_Desc_Value *val, // value of the parameter
GrB_Descriptor desc, // descriptor to query; NULL means defaults
GrB_Desc_Field field // parameter to query
) ;
GB_PUBLIC
GrB_Info GxB_Desc_set // set a parameter in a descriptor
(
GrB_Descriptor desc, // descriptor to modify
GrB_Desc_Field field, // parameter to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Desc_get // get a parameter from a descriptor
(
GrB_Descriptor desc, // descriptor to query; NULL means defaults
GrB_Desc_Field field, // parameter to query
... // value of the parameter
) ;
GB_PUBLIC
GrB_Info GrB_Descriptor_free // free a descriptor
(
GrB_Descriptor *descriptor // handle of descriptor to free
) ;
// Predefined descriptors and their values:
GB_PUBLIC
GrB_Descriptor // OUTP MASK MASK INP0 INP1
// structural complement
// =========== ============== ========== ======== ========
// GrB_NULL // - - - - -
GrB_DESC_T1 , // - - - - GrB_TRAN
GrB_DESC_T0 , // - - - GrB_TRAN -
GrB_DESC_T0T1 , // - - - GrB_TRAN GrB_TRAN
GrB_DESC_C , // - - GrB_COMP - -
GrB_DESC_CT1 , // - - GrB_COMP - GrB_TRAN
GrB_DESC_CT0 , // - - GrB_COMP GrB_TRAN -
GrB_DESC_CT0T1 , // - - GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_S , // - GrB_STRUCTURE - - -
GrB_DESC_ST1 , // - GrB_STRUCTURE - - GrB_TRAN
GrB_DESC_ST0 , // - GrB_STRUCTURE - GrB_TRAN -
GrB_DESC_ST0T1 , // - GrB_STRUCTURE - GrB_TRAN GrB_TRAN
GrB_DESC_SC , // - GrB_STRUCTURE GrB_COMP - -
GrB_DESC_SCT1 , // - GrB_STRUCTURE GrB_COMP - GrB_TRAN
GrB_DESC_SCT0 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN -
GrB_DESC_SCT0T1 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_R , // GrB_REPLACE - - - -
GrB_DESC_RT1 , // GrB_REPLACE - - - GrB_TRAN
GrB_DESC_RT0 , // GrB_REPLACE - - GrB_TRAN -
GrB_DESC_RT0T1 , // GrB_REPLACE - - GrB_TRAN GrB_TRAN
GrB_DESC_RC , // GrB_REPLACE - GrB_COMP - -
GrB_DESC_RCT1 , // GrB_REPLACE - GrB_COMP - GrB_TRAN
GrB_DESC_RCT0 , // GrB_REPLACE - GrB_COMP GrB_TRAN -
GrB_DESC_RCT0T1 , // GrB_REPLACE - GrB_COMP GrB_TRAN GrB_TRAN
GrB_DESC_RS , // GrB_REPLACE GrB_STRUCTURE - - -
GrB_DESC_RST1 , // GrB_REPLACE GrB_STRUCTURE - - GrB_TRAN
GrB_DESC_RST0 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN -
GrB_DESC_RST0T1 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN GrB_TRAN
GrB_DESC_RSC , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - -
GrB_DESC_RSCT1 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - GrB_TRAN
GrB_DESC_RSCT0 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN -
GrB_DESC_RSCT0T1 ; // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN
// GrB_NULL is the default descriptor, with all settings at their defaults:
//
// OUTP: do not replace the output
// MASK: mask is valued and not complemented
// INP0: first input not transposed
// INP1: second input not transposed
// Predefined descriptors may not be modified or freed. Attempting to modify
// them results in an error (GrB_INVALID_VALUE). Attempts to free them are
// silently ignored.
//==============================================================================
// GrB_Type: data types
//==============================================================================
typedef struct GB_Type_opaque *GrB_Type ;
// GraphBLAS predefined types and their counterparts in pure C:
GB_PUBLIC GrB_Type
GrB_BOOL , // in C: bool
GrB_INT8 , // in C: int8_t
GrB_INT16 , // in C: int16_t
GrB_INT32 , // in C: int32_t
GrB_INT64 , // in C: int64_t
GrB_UINT8 , // in C: uint8_t
GrB_UINT16 , // in C: uint16_t
GrB_UINT32 , // in C: uint32_t
GrB_UINT64 , // in C: uint64_t
GrB_FP32 , // in C: float
GrB_FP64 , // in C: double
GxB_FC32 , // in C: float complex
GxB_FC64 ; // in C: double complex
//------------------------------------------------------------------------------
// helper macros for polymorphic functions
//------------------------------------------------------------------------------
#define GB_CAT(w,x,y,z) w ## x ## y ## z
#define GB_CONCAT(w,x,y,z) GB_CAT (w, x, y, z)
#if GxB_STDC_VERSION >= 201112L
#define GB_CASES(p,prefix,func) \
const bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \
bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \
const int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \
int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \
const int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \
int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \
const int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \
int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \
const int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \
int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \
const uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \
uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \
const uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \
uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \
const uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \
uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \
const uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \
uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \
const float p : GB_CONCAT ( prefix, _, func, _FP32 ), \
float p : GB_CONCAT ( prefix, _, func, _FP32 ), \
const double p : GB_CONCAT ( prefix, _, func, _FP64 ), \
double p : GB_CONCAT ( prefix, _, func, _FP64 ), \
const GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \
GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \
const GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \
GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \
const void * : GB_CONCAT ( prefix, _, func, _UDT ), \
void * : GB_CONCAT ( prefix, _, func, _UDT )
#endif
//------------------------------------------------------------------------------
// GrB_Type_new: create a new type
//------------------------------------------------------------------------------
// GrB_Type_new is implemented both as a macro and a function. Both are
// user-callable. The default is to use the macro, since this allows the name
// of the type to be saved as a string, for subsequent error reporting by
// GrB_error.
#undef GrB_Type_new
#undef GrM_Type_new
GB_PUBLIC
GrB_Info GRB (Type_new) // create a new GraphBLAS type
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype // size = sizeof (ctype) of the C type
) ;
// user code should not directly use GB_STR or GB_XSTR
// GB_STR: convert the content of x into a string "x"
#define GB_XSTR(x) GB_STR(x)
#define GB_STR(x) #x
// GrB_Type_new as a user-callable macro, which allows the name of the ctype
// to be added to the new type. The type_defn is unknown.
#define GrB_Type_new(utype, sizeof_ctype) \
GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL)
#define GrM_Type_new(utype, sizeof_ctype) \
GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL)
// GxB_Type_new creates a type with a name and definition that are known to
// GraphBLAS, as strings. The type_name is any valid string (max length of 128
// characters, including the required null-terminating character) that may
// appear as the name of a C type created by a C "typedef" statement. It must
// not contain any white-space characters. Example, creating a type of size
// 16*4+4 = 68 bytes, with a 4-by-4 dense float array and a 32-bit integer:
//
// typedef struct { float x [4][4] ; int color ; } myquaternion ;
// GrB_Type MyQtype ;
// GxB_Type_new (&MyQtype, sizeof (myquaternion), "myquaternion",
// "typedef struct { float x [4][4] ; int color ; } myquaternion ;") ;
//
// The type_name and type_defn are both null-terminated strings. Currently,
// type_defn is unused, but it will be required for best performance when a JIT
// is implemented in SuiteSparse:GraphBLAS (both on the CPU and GPU). User
// defined types created by GrB_Type_new will not work with a JIT.
//
// At most GxB_MAX_NAME_LEN characters are accessed in type_name; characters
// beyond that limit are silently ignored.
#define GxB_MAX_NAME_LEN 128
GB_PUBLIC
GrB_Info GxB_Type_new // create a new named GraphBLAS type
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype, // size = sizeof (ctype) of the C type
const char *type_name, // name of the type (max 128 characters)
const char *type_defn // typedef for the type (no max length)
) ;
// GB_Type_new is historical: use GxB_Type_new instead
GB_PUBLIC
GrB_Info GB_Type_new // not user-callable
(
GrB_Type *type, // handle of user type to create
size_t sizeof_ctype, // size of the user type
const char *type_name // name of the type, as "sizeof (ctype)"
) ;
GB_PUBLIC
GrB_Info GxB_Type_name // return the name of a GraphBLAS type
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Type type
) ;
GB_PUBLIC
GrB_Info GxB_Type_size // determine the size of the type
(
size_t *size, // the sizeof the type
const GrB_Type type // type to determine the sizeof
) ;
GB_PUBLIC
GrB_Info GxB_Type_from_name // return the built-in GrB_Type from a name
(
GrB_Type *type, // built-in type, or NULL if user-defined
const char *type_name // array of size at least GxB_MAX_NAME_LEN
) ;
GB_PUBLIC
GrB_Info GrB_Type_free // free a user-defined type
(
GrB_Type *type // handle of user-defined type to free
) ;
//==============================================================================
// GrB_UnaryOp: unary operators
//==============================================================================
// GrB_UnaryOp: a function z=f(x). The function f must have the signature:
// void f (void *z, const void *x) ;
// The pointers are void * but they are always of pointers to objects of type
// ztype and xtype, respectively. The function must typecast its arguments as
// needed from void* to ztype* and xtype*.
typedef struct GB_UnaryOp_opaque *GrB_UnaryOp ;
//------------------------------------------------------------------------------
// built-in unary operators, z = f(x)
//------------------------------------------------------------------------------
GB_PUBLIC GrB_UnaryOp
// For these functions z=f(x), z and x have the same type.
// The suffix in the name is the type of x and z.
// z = x z = -x z = 1/x z = ! (x != 0)
// identity additive multiplicative logical
// inverse inverse negation
GrB_IDENTITY_BOOL, GrB_AINV_BOOL, GrB_MINV_BOOL, GxB_LNOT_BOOL,
GrB_IDENTITY_INT8, GrB_AINV_INT8, GrB_MINV_INT8, GxB_LNOT_INT8,
GrB_IDENTITY_INT16, GrB_AINV_INT16, GrB_MINV_INT16, GxB_LNOT_INT16,
GrB_IDENTITY_INT32, GrB_AINV_INT32, GrB_MINV_INT32, GxB_LNOT_INT32,
GrB_IDENTITY_INT64, GrB_AINV_INT64, GrB_MINV_INT64, GxB_LNOT_INT64,
GrB_IDENTITY_UINT8, GrB_AINV_UINT8, GrB_MINV_UINT8, GxB_LNOT_UINT8,
GrB_IDENTITY_UINT16, GrB_AINV_UINT16, GrB_MINV_UINT16, GxB_LNOT_UINT16,
GrB_IDENTITY_UINT32, GrB_AINV_UINT32, GrB_MINV_UINT32, GxB_LNOT_UINT32,
GrB_IDENTITY_UINT64, GrB_AINV_UINT64, GrB_MINV_UINT64, GxB_LNOT_UINT64,
GrB_IDENTITY_FP32, GrB_AINV_FP32, GrB_MINV_FP32, GxB_LNOT_FP32,
GrB_IDENTITY_FP64, GrB_AINV_FP64, GrB_MINV_FP64, GxB_LNOT_FP64,
// complex unary operators:
GxB_IDENTITY_FC32, GxB_AINV_FC32, GxB_MINV_FC32, // no LNOT
GxB_IDENTITY_FC64, GxB_AINV_FC64, GxB_MINV_FC64, // for complex
// z = 1 z = abs(x) z = bnot(x) z = signum
// one absolute value bitwise negation
GxB_ONE_BOOL, GrB_ABS_BOOL,
GxB_ONE_INT8, GrB_ABS_INT8, GrB_BNOT_INT8,
GxB_ONE_INT16, GrB_ABS_INT16, GrB_BNOT_INT16,
GxB_ONE_INT32, GrB_ABS_INT32, GrB_BNOT_INT32,
GxB_ONE_INT64, GrB_ABS_INT64, GrB_BNOT_INT64,
GxB_ONE_UINT8, GrB_ABS_UINT8, GrB_BNOT_UINT8,
GxB_ONE_UINT16, GrB_ABS_UINT16, GrB_BNOT_UINT16,
GxB_ONE_UINT32, GrB_ABS_UINT32, GrB_BNOT_UINT32,
GxB_ONE_UINT64, GrB_ABS_UINT64, GrB_BNOT_UINT64,
GxB_ONE_FP32, GrB_ABS_FP32,
GxB_ONE_FP64, GrB_ABS_FP64,
// complex unary operators:
GxB_ONE_FC32, // for complex types, z = abs(x)
GxB_ONE_FC64, // is real; listed below.
// Boolean negation, z = !x, where both z and x are boolean. There is no
// suffix since z and x are only boolean. This operator is identical to
// GxB_LNOT_BOOL; it just has a different name.
GrB_LNOT ;
// GxB_ABS is now in the v1.3 spec, the following names are historical:
GB_PUBLIC GrB_UnaryOp
// z = abs(x)
GxB_ABS_BOOL,
GxB_ABS_INT8,
GxB_ABS_INT16,
GxB_ABS_INT32,
GxB_ABS_INT64,
GxB_ABS_UINT8,
GxB_ABS_UINT16,
GxB_ABS_UINT32,
GxB_ABS_UINT64,
GxB_ABS_FP32,
GxB_ABS_FP64 ;
//------------------------------------------------------------------------------
// Unary operators for floating-point types only
//------------------------------------------------------------------------------
// The following floating-point unary operators and their ANSI C11 equivalents,
// are only defined for floating-point (real and complex) types.
GB_PUBLIC GrB_UnaryOp
//--------------------------------------------------------------------------
// z = f(x) where z and x have the same type (all 4 floating-point types)
//--------------------------------------------------------------------------
// z = sqrt (x) z = log (x) z = exp (x) z = log2 (x)
GxB_SQRT_FP32, GxB_LOG_FP32, GxB_EXP_FP32, GxB_LOG2_FP32,
GxB_SQRT_FP64, GxB_LOG_FP64, GxB_EXP_FP64, GxB_LOG2_FP64,
GxB_SQRT_FC32, GxB_LOG_FC32, GxB_EXP_FC32, GxB_LOG2_FC32,
GxB_SQRT_FC64, GxB_LOG_FC64, GxB_EXP_FC64, GxB_LOG2_FC64,
// z = sin (x) z = cos (x) z = tan (x)
GxB_SIN_FP32, GxB_COS_FP32, GxB_TAN_FP32,
GxB_SIN_FP64, GxB_COS_FP64, GxB_TAN_FP64,
GxB_SIN_FC32, GxB_COS_FC32, GxB_TAN_FC32,
GxB_SIN_FC64, GxB_COS_FC64, GxB_TAN_FC64,
// z = acos (x) z = asin (x) z = atan (x)
GxB_ACOS_FP32, GxB_ASIN_FP32, GxB_ATAN_FP32,
GxB_ACOS_FP64, GxB_ASIN_FP64, GxB_ATAN_FP64,
GxB_ACOS_FC32, GxB_ASIN_FC32, GxB_ATAN_FC32,
GxB_ACOS_FC64, GxB_ASIN_FC64, GxB_ATAN_FC64,
// z = sinh (x) z = cosh (x) z = tanh (x)
GxB_SINH_FP32, GxB_COSH_FP32, GxB_TANH_FP32,
GxB_SINH_FP64, GxB_COSH_FP64, GxB_TANH_FP64,
GxB_SINH_FC32, GxB_COSH_FC32, GxB_TANH_FC32,
GxB_SINH_FC64, GxB_COSH_FC64, GxB_TANH_FC64,
// z = acosh (x) z = asinh (x) z = atanh (x) z = signum (x)
GxB_ACOSH_FP32, GxB_ASINH_FP32, GxB_ATANH_FP32, GxB_SIGNUM_FP32,
GxB_ACOSH_FP64, GxB_ASINH_FP64, GxB_ATANH_FP64, GxB_SIGNUM_FP64,
GxB_ACOSH_FC32, GxB_ASINH_FC32, GxB_ATANH_FC32, GxB_SIGNUM_FC32,
GxB_ACOSH_FC64, GxB_ASINH_FC64, GxB_ATANH_FC64, GxB_SIGNUM_FC64,
// z = ceil (x) z = floor (x) z = round (x) z = trunc (x)
GxB_CEIL_FP32, GxB_FLOOR_FP32, GxB_ROUND_FP32, GxB_TRUNC_FP32,
GxB_CEIL_FP64, GxB_FLOOR_FP64, GxB_ROUND_FP64, GxB_TRUNC_FP64,
GxB_CEIL_FC32, GxB_FLOOR_FC32, GxB_ROUND_FC32, GxB_TRUNC_FC32,
GxB_CEIL_FC64, GxB_FLOOR_FC64, GxB_ROUND_FC64, GxB_TRUNC_FC64,
// z = exp2 (x) z = expm1 (x) z = log10 (x) z = log1p (x)
GxB_EXP2_FP32, GxB_EXPM1_FP32, GxB_LOG10_FP32, GxB_LOG1P_FP32,
GxB_EXP2_FP64, GxB_EXPM1_FP64, GxB_LOG10_FP64, GxB_LOG1P_FP64,
GxB_EXP2_FC32, GxB_EXPM1_FC32, GxB_LOG10_FC32, GxB_LOG1P_FC32,
GxB_EXP2_FC64, GxB_EXPM1_FC64, GxB_LOG10_FC64, GxB_LOG1P_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z and x are the same type (floating-point real only)
//--------------------------------------------------------------------------
// z = lgamma (x) z = tgamma (x) z = erf (x) z = erfc (x)
GxB_LGAMMA_FP32, GxB_TGAMMA_FP32, GxB_ERF_FP32, GxB_ERFC_FP32,
GxB_LGAMMA_FP64, GxB_TGAMMA_FP64, GxB_ERF_FP64, GxB_ERFC_FP64,
// frexpx and frexpe return the mantissa and exponent, respectively,
// from the ANSI C11 frexp function. The exponent is returned as a
// floating-point value, not an integer.
// z = frexpx (x) z = frexpe (x)
GxB_FREXPX_FP32, GxB_FREXPE_FP32,
GxB_FREXPX_FP64, GxB_FREXPE_FP64,
//--------------------------------------------------------------------------
// z = f(x) where z and x are the same type (complex only)
//--------------------------------------------------------------------------
// z = conj (x)
GxB_CONJ_FC32,
GxB_CONJ_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z is real and x is complex:
//--------------------------------------------------------------------------
// z = creal (x) z = cimag (x) z = carg (x) z = abs (x)
GxB_CREAL_FC32, GxB_CIMAG_FC32, GxB_CARG_FC32, GxB_ABS_FC32,
GxB_CREAL_FC64, GxB_CIMAG_FC64, GxB_CARG_FC64, GxB_ABS_FC64,
//--------------------------------------------------------------------------
// z = f(x) where z is bool and x is any floating-point type
//--------------------------------------------------------------------------
// z = isinf (x)
GxB_ISINF_FP32,
GxB_ISINF_FP64,
GxB_ISINF_FC32, // isinf (creal (x)) || isinf (cimag (x))
GxB_ISINF_FC64, // isinf (creal (x)) || isinf (cimag (x))
// z = isnan (x)
GxB_ISNAN_FP32,
GxB_ISNAN_FP64,
GxB_ISNAN_FC32, // isnan (creal (x)) || isnan (cimag (x))
GxB_ISNAN_FC64, // isnan (creal (x)) || isnan (cimag (x))
// z = isfinite (x)
GxB_ISFINITE_FP32,
GxB_ISFINITE_FP64,
GxB_ISFINITE_FC32, // isfinite (real (x)) && isfinite (cimag (x))
GxB_ISFINITE_FC64 ; // isfinite (real (x)) && isfinite (cimag (x))
//------------------------------------------------------------------------------
// methods for unary operators
//------------------------------------------------------------------------------
typedef void (*GxB_unary_function) (void *, const void *) ;
// GrB_UnaryOp_new creates a user-defined unary op, with an automatic
// detection of the operator name.
#undef GrB_UnaryOp_new
#undef GrM_UnaryOp_new
GB_PUBLIC
GrB_Info GRB (UnaryOp_new) // create a new user-defined unary operator
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype // type of input x
) ;
#define GrB_UnaryOp_new(op,f,z,x) \
GxB_UnaryOp_new(op,f,z,x, GB_STR(f), NULL)
#define GrM_UnaryOp_new(op,f,z,x) \
GxM_UnaryOp_new(op,f,z,x, GB_STR(f), NULL)
// GxB_UnaryOp_new creates a named user-defined unary op.
GB_PUBLIC
GrB_Info GxB_UnaryOp_new // create a new user-defined unary operator
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
const char *unop_name, // name of the user function
const char *unop_defn // definition of the user function
) ;
// GB_UnaryOp_new is historical: use GxB_UnaryOp_new instead
GB_PUBLIC
GrB_Info GB_UnaryOp_new // not user-callable
(
GrB_UnaryOp *unaryop, // handle for the new unary operator
GxB_unary_function function, // pointer to the unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
const char *unop_name // name of the user function
) ;
// GxB_UnaryOp_ztype is historical. Use GxB_UnaryOp_ztype_name instead.
GB_PUBLIC
GrB_Info GxB_UnaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_UnaryOp unaryop // unary operator
) ;
// GxB_UnaryOp_xtype is historical. Use GxB_UnaryOp_xtype_name instead.
GB_PUBLIC
GrB_Info GxB_UnaryOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_UnaryOp unaryop // unary operator
) ;
GB_PUBLIC
GrB_Info GrB_UnaryOp_free // free a user-created unary operator
(
GrB_UnaryOp *unaryop // handle of unary operator to free
) ;
//==============================================================================
// GrB_BinaryOp: binary operators
//==============================================================================
// GrB_BinaryOp: a function z=f(x,y). The function f must have the signature:
// void f (void *z, const void *x, const void *y) ;
// The pointers are void * but they are always of pointers to objects of type
// ztype, xtype, and ytype, respectively. See Demo/usercomplex.c for examples.
typedef struct GB_BinaryOp_opaque *GrB_BinaryOp ;
//------------------------------------------------------------------------------
// built-in binary operators, z = f(x,y), where x,y,z all have the same type
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// operators for all 13 types (including complex):
// GxB_PAIR_T and GrB_ONEB_T are identical; the latter was added to the
// v2.0 C API Specification.
// z = x z = y z = 1 z = pow (x,y)
GrB_FIRST_BOOL, GrB_SECOND_BOOL, GrB_ONEB_BOOL, GxB_POW_BOOL,
GrB_FIRST_INT8, GrB_SECOND_INT8, GrB_ONEB_INT8, GxB_POW_INT8,
GrB_FIRST_INT16, GrB_SECOND_INT16, GrB_ONEB_INT16, GxB_POW_INT16,
GrB_FIRST_INT32, GrB_SECOND_INT32, GrB_ONEB_INT32, GxB_POW_INT32,
GrB_FIRST_INT64, GrB_SECOND_INT64, GrB_ONEB_INT64, GxB_POW_INT64,
GrB_FIRST_UINT8, GrB_SECOND_UINT8, GrB_ONEB_UINT8, GxB_POW_UINT8,
GrB_FIRST_UINT16, GrB_SECOND_UINT16, GrB_ONEB_UINT16, GxB_POW_UINT16,
GrB_FIRST_UINT32, GrB_SECOND_UINT32, GrB_ONEB_UINT32, GxB_POW_UINT32,
GrB_FIRST_UINT64, GrB_SECOND_UINT64, GrB_ONEB_UINT64, GxB_POW_UINT64,
GrB_FIRST_FP32, GrB_SECOND_FP32, GrB_ONEB_FP32, GxB_POW_FP32,
GrB_FIRST_FP64, GrB_SECOND_FP64, GrB_ONEB_FP64, GxB_POW_FP64,
// complex:
GxB_FIRST_FC32, GxB_SECOND_FC32, GxB_ONEB_FC32, GxB_POW_FC32,
GxB_FIRST_FC64, GxB_SECOND_FC64, GxB_ONEB_FC64, GxB_POW_FC64,
// z = x+y z = x-y z = x*y z = x/y
GrB_PLUS_BOOL, GrB_MINUS_BOOL, GrB_TIMES_BOOL, GrB_DIV_BOOL,
GrB_PLUS_INT8, GrB_MINUS_INT8, GrB_TIMES_INT8, GrB_DIV_INT8,
GrB_PLUS_INT16, GrB_MINUS_INT16, GrB_TIMES_INT16, GrB_DIV_INT16,
GrB_PLUS_INT32, GrB_MINUS_INT32, GrB_TIMES_INT32, GrB_DIV_INT32,
GrB_PLUS_INT64, GrB_MINUS_INT64, GrB_TIMES_INT64, GrB_DIV_INT64,
GrB_PLUS_UINT8, GrB_MINUS_UINT8, GrB_TIMES_UINT8, GrB_DIV_UINT8,
GrB_PLUS_UINT16, GrB_MINUS_UINT16, GrB_TIMES_UINT16, GrB_DIV_UINT16,
GrB_PLUS_UINT32, GrB_MINUS_UINT32, GrB_TIMES_UINT32, GrB_DIV_UINT32,
GrB_PLUS_UINT64, GrB_MINUS_UINT64, GrB_TIMES_UINT64, GrB_DIV_UINT64,
GrB_PLUS_FP32, GrB_MINUS_FP32, GrB_TIMES_FP32, GrB_DIV_FP32,
GrB_PLUS_FP64, GrB_MINUS_FP64, GrB_TIMES_FP64, GrB_DIV_FP64,
// complex:
GxB_PLUS_FC32, GxB_MINUS_FC32, GxB_TIMES_FC32, GxB_DIV_FC32,
GxB_PLUS_FC64, GxB_MINUS_FC64, GxB_TIMES_FC64, GxB_DIV_FC64,
// z = y-x z = y/x z = 1 z = any(x,y)
GxB_RMINUS_BOOL, GxB_RDIV_BOOL, GxB_PAIR_BOOL, GxB_ANY_BOOL,
GxB_RMINUS_INT8, GxB_RDIV_INT8, GxB_PAIR_INT8, GxB_ANY_INT8,
GxB_RMINUS_INT16, GxB_RDIV_INT16, GxB_PAIR_INT16, GxB_ANY_INT16,
GxB_RMINUS_INT32, GxB_RDIV_INT32, GxB_PAIR_INT32, GxB_ANY_INT32,
GxB_RMINUS_INT64, GxB_RDIV_INT64, GxB_PAIR_INT64, GxB_ANY_INT64,
GxB_RMINUS_UINT8, GxB_RDIV_UINT8, GxB_PAIR_UINT8, GxB_ANY_UINT8,
GxB_RMINUS_UINT16, GxB_RDIV_UINT16, GxB_PAIR_UINT16, GxB_ANY_UINT16,
GxB_RMINUS_UINT32, GxB_RDIV_UINT32, GxB_PAIR_UINT32, GxB_ANY_UINT32,
GxB_RMINUS_UINT64, GxB_RDIV_UINT64, GxB_PAIR_UINT64, GxB_ANY_UINT64,
GxB_RMINUS_FP32, GxB_RDIV_FP32, GxB_PAIR_FP32, GxB_ANY_FP32,
GxB_RMINUS_FP64, GxB_RDIV_FP64, GxB_PAIR_FP64, GxB_ANY_FP64,
// complex:
GxB_RMINUS_FC32, GxB_RDIV_FC32, GxB_PAIR_FC32, GxB_ANY_FC32,
GxB_RMINUS_FC64, GxB_RDIV_FC64, GxB_PAIR_FC64, GxB_ANY_FC64,
// The GxB_IS* comparators z=f(x,y) return the same type as their
// inputs. Each of them compute z = (x OP y), where x, y, and z all have
// the same type. The value z is either 1 for true or 0 for false, but it
// is a value with the same type as x and y.
// z = (x == y) z = (x != y)
GxB_ISEQ_BOOL, GxB_ISNE_BOOL,
GxB_ISEQ_INT8, GxB_ISNE_INT8,
GxB_ISEQ_INT16, GxB_ISNE_INT16,
GxB_ISEQ_INT32, GxB_ISNE_INT32,
GxB_ISEQ_INT64, GxB_ISNE_INT64,
GxB_ISEQ_UINT8, GxB_ISNE_UINT8,
GxB_ISEQ_UINT16, GxB_ISNE_UINT16,
GxB_ISEQ_UINT32, GxB_ISNE_UINT32,
GxB_ISEQ_UINT64, GxB_ISNE_UINT64,
GxB_ISEQ_FP32, GxB_ISNE_FP32,
GxB_ISEQ_FP64, GxB_ISNE_FP64,
// complex:
GxB_ISEQ_FC32, GxB_ISNE_FC32,
GxB_ISEQ_FC64, GxB_ISNE_FC64,
// z = (x > y) z = (x < y) z = (x >= y) z = (x <= y)
GxB_ISGT_BOOL, GxB_ISLT_BOOL, GxB_ISGE_BOOL, GxB_ISLE_BOOL,
GxB_ISGT_INT8, GxB_ISLT_INT8, GxB_ISGE_INT8, GxB_ISLE_INT8,
GxB_ISGT_INT16, GxB_ISLT_INT16, GxB_ISGE_INT16, GxB_ISLE_INT16,
GxB_ISGT_INT32, GxB_ISLT_INT32, GxB_ISGE_INT32, GxB_ISLE_INT32,
GxB_ISGT_INT64, GxB_ISLT_INT64, GxB_ISGE_INT64, GxB_ISLE_INT64,
GxB_ISGT_UINT8, GxB_ISLT_UINT8, GxB_ISGE_UINT8, GxB_ISLE_UINT8,
GxB_ISGT_UINT16, GxB_ISLT_UINT16, GxB_ISGE_UINT16, GxB_ISLE_UINT16,
GxB_ISGT_UINT32, GxB_ISLT_UINT32, GxB_ISGE_UINT32, GxB_ISLE_UINT32,
GxB_ISGT_UINT64, GxB_ISLT_UINT64, GxB_ISGE_UINT64, GxB_ISLE_UINT64,
GxB_ISGT_FP32, GxB_ISLT_FP32, GxB_ISGE_FP32, GxB_ISLE_FP32,
GxB_ISGT_FP64, GxB_ISLT_FP64, GxB_ISGE_FP64, GxB_ISLE_FP64,
// z = min(x,y) z = max (x,y)
GrB_MIN_BOOL, GrB_MAX_BOOL,
GrB_MIN_INT8, GrB_MAX_INT8,
GrB_MIN_INT16, GrB_MAX_INT16,
GrB_MIN_INT32, GrB_MAX_INT32,
GrB_MIN_INT64, GrB_MAX_INT64,
GrB_MIN_UINT8, GrB_MAX_UINT8,
GrB_MIN_UINT16, GrB_MAX_UINT16,
GrB_MIN_UINT32, GrB_MAX_UINT32,
GrB_MIN_UINT64, GrB_MAX_UINT64,
GrB_MIN_FP32, GrB_MAX_FP32,
GrB_MIN_FP64, GrB_MAX_FP64,
// Binary operators for each of the 11 real types:
// The operators convert non-boolean types internally to boolean and return
// a value 1 or 0 in the same type, for true or false. Each computes z =
// ((x != 0) OP (y != 0)), where x, y, and z all the same type. These
// operators are useful as multiplicative operators when combined with
// non-boolean monoids of the same type.
// z = (x || y) z = (x && y) z = (x != y)
GxB_LOR_BOOL, GxB_LAND_BOOL, GxB_LXOR_BOOL,
GxB_LOR_INT8, GxB_LAND_INT8, GxB_LXOR_INT8,
GxB_LOR_INT16, GxB_LAND_INT16, GxB_LXOR_INT16,
GxB_LOR_INT32, GxB_LAND_INT32, GxB_LXOR_INT32,
GxB_LOR_INT64, GxB_LAND_INT64, GxB_LXOR_INT64,
GxB_LOR_UINT8, GxB_LAND_UINT8, GxB_LXOR_UINT8,
GxB_LOR_UINT16, GxB_LAND_UINT16, GxB_LXOR_UINT16,
GxB_LOR_UINT32, GxB_LAND_UINT32, GxB_LXOR_UINT32,
GxB_LOR_UINT64, GxB_LAND_UINT64, GxB_LXOR_UINT64,
GxB_LOR_FP32, GxB_LAND_FP32, GxB_LXOR_FP32,
GxB_LOR_FP64, GxB_LAND_FP64, GxB_LXOR_FP64,
// Binary operators that operate only on boolean types: LOR, LAND, LXOR,
// and LXNOR. The naming convention differs (_BOOL is not appended to the
// name). They are the same as GxB_LOR_BOOL, GxB_LAND_BOOL, and
// GxB_LXOR_BOOL, and GrB_EQ_BOOL, respectively.
// z = (x || y) z = (x && y) z = (x != y) z = (x == y)
GrB_LOR, GrB_LAND, GrB_LXOR, GrB_LXNOR,
// Operators for floating-point reals:
// z = atan2(x,y) z = hypot(x,y) z = fmod(x,y) z = remainder(x,y)
GxB_ATAN2_FP32, GxB_HYPOT_FP32, GxB_FMOD_FP32, GxB_REMAINDER_FP32,
GxB_ATAN2_FP64, GxB_HYPOT_FP64, GxB_FMOD_FP64, GxB_REMAINDER_FP64,
// z = ldexp(x,y) z = copysign (x,y)
GxB_LDEXP_FP32, GxB_COPYSIGN_FP32,
GxB_LDEXP_FP64, GxB_COPYSIGN_FP64,
// Bitwise operations on signed and unsigned integers: note that
// bitwise operations on signed integers can lead to different results,
// depending on your compiler; results are implementation-defined.
// z = (x | y) z = (x & y) z = (x ^ y) z = ~(x ^ y)
GrB_BOR_INT8, GrB_BAND_INT8, GrB_BXOR_INT8, GrB_BXNOR_INT8,
GrB_BOR_INT16, GrB_BAND_INT16, GrB_BXOR_INT16, GrB_BXNOR_INT16,
GrB_BOR_INT32, GrB_BAND_INT32, GrB_BXOR_INT32, GrB_BXNOR_INT32,
GrB_BOR_INT64, GrB_BAND_INT64, GrB_BXOR_INT64, GrB_BXNOR_INT64,
GrB_BOR_UINT8, GrB_BAND_UINT8, GrB_BXOR_UINT8, GrB_BXNOR_UINT8,
GrB_BOR_UINT16, GrB_BAND_UINT16, GrB_BXOR_UINT16, GrB_BXNOR_UINT16,
GrB_BOR_UINT32, GrB_BAND_UINT32, GrB_BXOR_UINT32, GrB_BXNOR_UINT32,
GrB_BOR_UINT64, GrB_BAND_UINT64, GrB_BXOR_UINT64, GrB_BXNOR_UINT64,
// z = bitget(x,y) z = bitset(x,y) z = bitclr(x,y)
GxB_BGET_INT8, GxB_BSET_INT8, GxB_BCLR_INT8,
GxB_BGET_INT16, GxB_BSET_INT16, GxB_BCLR_INT16,
GxB_BGET_INT32, GxB_BSET_INT32, GxB_BCLR_INT32,
GxB_BGET_INT64, GxB_BSET_INT64, GxB_BCLR_INT64,
GxB_BGET_UINT8, GxB_BSET_UINT8, GxB_BCLR_UINT8,
GxB_BGET_UINT16, GxB_BSET_UINT16, GxB_BCLR_UINT16,
GxB_BGET_UINT32, GxB_BSET_UINT32, GxB_BCLR_UINT32,
GxB_BGET_UINT64, GxB_BSET_UINT64, GxB_BCLR_UINT64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z and x have the same type, but y is GrB_INT8
//------------------------------------------------------------------------------
// z = bitshift (x,y) computes z = x left-shifted by y bits if y >= 0, or z
// = x right-shifted by (-y) bits if y < 0. z is equal to x if y is zero.
// z and x have the same type, as given by the suffix on the operator name.
// Since y must be signed, it cannot have the same type as x when x is
// unsigned; it is always GrB_INT8 for all 8 versions of this operator.
// The GxB_BSHIFT_* operators compute the arithmetic shift, and produce the
// same results as the bitshift.m function, for all possible inputs.
GB_PUBLIC GrB_BinaryOp
// z = bitshift(x,y)
GxB_BSHIFT_INT8,
GxB_BSHIFT_INT16,
GxB_BSHIFT_INT32,
GxB_BSHIFT_INT64,
GxB_BSHIFT_UINT8,
GxB_BSHIFT_UINT16,
GxB_BSHIFT_UINT32,
GxB_BSHIFT_UINT64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z is BOOL and the type of x,y is given by the suffix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// Six comparators z=f(x,y) return their result as boolean, but
// where x and y have the same type. The suffix in their names refers to
// the type of x and y since z is always boolean. If used as multiply
// operators in a semiring, they can only be combined with boolean monoids.
// The _BOOL versions of these operators give the same results as their
// IS*_BOOL counterparts. GrB_EQ_BOOL and GrB_LXNOR are identical.
// z = (x == y) z = (x != y) z = (x > y) z = (x < y)
GrB_EQ_BOOL, GrB_NE_BOOL, GrB_GT_BOOL, GrB_LT_BOOL,
GrB_EQ_INT8, GrB_NE_INT8, GrB_GT_INT8, GrB_LT_INT8,
GrB_EQ_INT16, GrB_NE_INT16, GrB_GT_INT16, GrB_LT_INT16,
GrB_EQ_INT32, GrB_NE_INT32, GrB_GT_INT32, GrB_LT_INT32,
GrB_EQ_INT64, GrB_NE_INT64, GrB_GT_INT64, GrB_LT_INT64,
GrB_EQ_UINT8, GrB_NE_UINT8, GrB_GT_UINT8, GrB_LT_UINT8,
GrB_EQ_UINT16, GrB_NE_UINT16, GrB_GT_UINT16, GrB_LT_UINT16,
GrB_EQ_UINT32, GrB_NE_UINT32, GrB_GT_UINT32, GrB_LT_UINT32,
GrB_EQ_UINT64, GrB_NE_UINT64, GrB_GT_UINT64, GrB_LT_UINT64,
GrB_EQ_FP32, GrB_NE_FP32, GrB_GT_FP32, GrB_LT_FP32,
GrB_EQ_FP64, GrB_NE_FP64, GrB_GT_FP64, GrB_LT_FP64,
// complex:
GxB_EQ_FC32, GxB_NE_FC32,
GxB_EQ_FC64, GxB_NE_FC64,
// z = (x >= y) z = (x <= y)
GrB_GE_BOOL, GrB_LE_BOOL,
GrB_GE_INT8, GrB_LE_INT8,
GrB_GE_INT16, GrB_LE_INT16,
GrB_GE_INT32, GrB_LE_INT32,
GrB_GE_INT64, GrB_LE_INT64,
GrB_GE_UINT8, GrB_LE_UINT8,
GrB_GE_UINT16, GrB_LE_UINT16,
GrB_GE_UINT32, GrB_LE_UINT32,
GrB_GE_UINT64, GrB_LE_UINT64,
GrB_GE_FP32, GrB_LE_FP32,
GrB_GE_FP64, GrB_LE_FP64 ;
//------------------------------------------------------------------------------
// z=f(x,y) where z is complex and the type of x,y is given by the suffix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_BinaryOp
// z = cmplx (x,y)
GxB_CMPLX_FP32,
GxB_CMPLX_FP64 ;
//==============================================================================
// positional GrB_UnaryOp and GrB_BinaryOp operators
//==============================================================================
// Positional operators do not depend on the value of an entry, but its row or
// column index in the matrix instead. For example, for an entry A(i,j),
// first_i(A(i,j),y) is equal to i. These operators are useful for returning
// node id's as the result of a semiring operation. If used as a mask, zero
// has a special value, and thus z=first_i1(A(i,j),j) returns i+1 instead of i.
// This can be useful when using a positional operator to construct a mask
// matrix or vector for another GraphBLAS operation. It is also essential for
// the @GrB interface, since the user view of matrix indices in @GrB is
// 1-based, not 0-based.
// When applied to a vector, j is always equal to 0. For a GxB_SCALAR,
// both i and j are always zero.
// GraphBLAS defines a GrB_Index as uint64_t, but these operators return a
// GrB_INT32 or GrB_INT64 type, which is more flexible to use because the
// result of this operator can be negated, to flag an entry for example. The
// value -1 can be used to denote "no node" or "no position". GrB_INT32 is
// useful for graphs smaller than 2^31 nodes. If the row or column index
// exceeds INT32_MAX, the result is determined by the typecast from the
// 64-bit index to the smaller 32-bit index.
// Positional operators cannot be used to construct monoids. They can be used
// as multiplicative operators in semirings, and as operators for GrB_eWise*,
// and GrB_apply (bind first or second). For the latter, the operator cannot
// depend on the bound scalar.
// When used as multiplicative operators in a semiring, FIRSTJ and SECONDI
// are identical. If C(i,j) += t is computed where t = A(i,k)*B(k,j), then
// t = k in both cases. Likewise, FIRSTJ1 and SECONDI1 are identical.
GB_PUBLIC GrB_BinaryOp
GxB_FIRSTI_INT32, GxB_FIRSTI_INT64, // z = first_i(A(i,j),y) == i
GxB_FIRSTI1_INT32, GxB_FIRSTI1_INT64, // z = first_i1(A(i,j),y) == i+1
GxB_FIRSTJ_INT32, GxB_FIRSTJ_INT64, // z = first_j(A(i,j),y) == j
GxB_FIRSTJ1_INT32, GxB_FIRSTJ1_INT64, // z = first_j1(A(i,j),y) == j+1
GxB_SECONDI_INT32, GxB_SECONDI_INT64, // z = second_i(x,B(i,j)) == i
GxB_SECONDI1_INT32, GxB_SECONDI1_INT64, // z = second_i1(x,B(i,j)) == i+1
GxB_SECONDJ_INT32, GxB_SECONDJ_INT64, // z = second_j(x,B(i,j)) == j
GxB_SECONDJ1_INT32, GxB_SECONDJ1_INT64 ; // z = second_j1(x,B(i,j)) == j+1
GB_PUBLIC GrB_UnaryOp
GxB_POSITIONI_INT32, GxB_POSITIONI_INT64, // z=position_i(A(i,j)) == i
GxB_POSITIONI1_INT32, GxB_POSITIONI1_INT64, // z=position_i1(A(i,j)) == i+1
GxB_POSITIONJ_INT32, GxB_POSITIONJ_INT64, // z=position_j(A(i,j)) == j
GxB_POSITIONJ1_INT32, GxB_POSITIONJ1_INT64 ;// z=position_j1(A(i,j)) == j+1
//==============================================================================
// special GrB_BinaryOp for build methods only
//==============================================================================
// In GrB*build* methods, passing dup as NULL means that no duplicates are
// tolerated. If duplicates appear, an error is returned. If dup is a binary
// operator, it is applied to reduce duplicates to a single value. The
// GxB_IGNORE_DUP is a special case. It is not an operator, but an indication
// that any duplicates are to be ignored.
GB_PUBLIC GrB_BinaryOp GxB_IGNORE_DUP ;
//==============================================================================
// About boolean and bitwise binary operators
//==============================================================================
// Some of the boolean operators compute the same thing with different names.
// For example, x*y and x&&y give the same results for boolean x and y.
// Operations such as x < y when x and y are boolean are treated as if true=1
// and false=0. Below is the truth table for all binary operators with boolean
// inputs. This table is defined by how C typecasts boolean values for
// non-boolean operations. For example, if x, y, and z are boolean, x = true,
// and y = true, then z = x + y = true + true = true. DIV (x/y) is defined
// below. RDIV (y/x) is shown as \ in the table; it is the same as 2nd.
// x y 1st 2nd min max + - * / or and xor eq ne > < ge le \ pow pair
// 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 1 1
// 0 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1
// 1 0 1 0 0 1 1 1 0 1 1 0 1 0 1 1 0 1 0 0 1 1
// 1 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 0 1 1 1 1 1
// GraphBLAS includes a GrB_DIV_BOOL operator in its specification, but does
// not define what boolean "division" means. SuiteSparse:GraphBLAS makes the
// following interpretation.
// GraphBLAS does not generate exceptions for divide-by-zero. Floating-point
// divide-by-zero follows the IEEE 754 standard: 1/0 is +Inf, -1/0 is -Inf, and
// 0/0 is NaN. For integer division by zero, if x is positive, x/0 is the
// largest integer, -x/0 is the integer minimum (zero for unsigned integers),
// and 0/0 is zero. For example, for int8, 1/0 is 127, and -1/0 is -128. For
// uint8, 1/0 is 255 and 0/0 is zero.
// Boolean division is treated as if it were an unsigned integer type with
// true=1 and false=0, and with the max and min value being 1 and 0. As a
// result, GrB_IDENTITY_BOOL, GrB_AINV_BOOL, and GrB_MINV_BOOL all give the
// same result (z = x).
// With this convention for boolean "division", there are 11 unique binary
// operators that are purely boolean. Other named *_BOOL operators are
// redundant but are included in GraphBLAS so that the name space of operators
// is complete. Below is a list of all operators and their equivalents.
// x: 0 0 1 1
// y: 0 1 0 1
// z: see below
//
// z = 0 0 0 0 0 (zero function, not predefined)
// z = (x && y) 0 0 0 1 AND, MIN, TIMES
// z = (x > y) 0 0 1 0 GT, ISGT, and set diff (x\y)
// z = x 0 0 1 1 FIRST, DIV
//
// z = (x < y) 0 1 0 0 LT, ISLT, and set diff (y\x)
// z = y 0 1 0 1 SECOND, RDIV
// z = (x != y) 0 1 1 0 XOR, MINUS, RMINUS, NE, ISNE
// z = (x || y) 0 1 1 1 OR, MAX, PLUS
//
// z = ~(x || y) 1 0 0 0 (nor(x,y) function, not predefined)
// z = (x == y) 1 0 0 1 LXNOR, EQ, ISEQ
// z = ~y 1 0 1 0 (not(y), not predefined)
// z = (x >= y) 1 0 1 1 GE, ISGE, POW, and "x implies y"
//
// z = ~x 1 1 0 0 (not(x), not predefined)
// z = (x <= y) 1 1 0 1 LE, ISLE, and "y implies x"
// z = ~(x && y) 1 1 1 0 (nand(x,y) function, not predefined)
// z = 1 1 1 1 1 PAIR, ONEB
//
// z = any(x,y) 0 . . 1 ANY (pick x or y arbitrarily)
// Four more that have no _BOOL suffix are also redundant with the operators
// of the form GxB_*_BOOL (GrB_LOR, GrB_LAND, GrB_LXOR, and GrB_LXNOR).
// Note that the boolean binary operator space is not complete. Five other
// boolean functions could be pre-defined as well: z = 0, nor(x,y),
// nand(x,y), not(x), and not(y).
// Four of the possible 16 bitwise operators are pre-defined: BOR, BAND,
// BXOR, and BXNOR. This assumes that the computations for each bit are
// entirely independent (so BSHIFT would not fit in the table above).
//------------------------------------------------------------------------------
// methods for binary operators
//------------------------------------------------------------------------------
typedef void (*GxB_binary_function) (void *, const void *, const void *) ;
// GrB_BinaryOp_new creates a user-defined binary op, with an automatic
// detection of the operator name.
#undef GrB_BinaryOp_new
#undef GrM_BinaryOp_new
GB_PUBLIC
GrB_Info GRB (BinaryOp_new)
(
GrB_BinaryOp *binaryop, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype // type of input y
) ;
#define GrB_BinaryOp_new(op,f,z,x,y) \
GxB_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
#define GrM_BinaryOp_new(op,f,z,x,y) \
GxM_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
// GxB_BinaryOp_new creates a named user-defined binary op.
GB_PUBLIC
GrB_Info GxB_BinaryOp_new
(
GrB_BinaryOp *op, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *binop_name, // name of the user function
const char *binop_defn // definition of the user function
) ;
// GB_BinaryOp_new is historical: use GxB_BinaryOp_new instead
GB_PUBLIC
GrB_Info GB_BinaryOp_new // not user-callable
(
GrB_BinaryOp *binaryop, // handle for the new binary operator
GxB_binary_function function, // pointer to the binary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *binop_name // name of the user function
) ;
// NOTE: GxB_BinaryOp_ztype is historical. Use GxB_BinaryOp_ztype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_ztype // return the type of z
(
GrB_Type *ztype, // return type of output z
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
// NOTE: GxB_BinaryOp_xtype is historical. Use GxB_BinaryOp_xtype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
// NOTE: GxB_BinaryOp_ytype is historical. Use GxB_BinaryOp_ytype_name instead.
GB_PUBLIC
GrB_Info GxB_BinaryOp_ytype // return the type of y
(
GrB_Type *ytype, // return type of input y
GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_ytype_name // return the type_name of y
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_BinaryOp binaryop // binary operator to query
) ;
GB_PUBLIC
GrB_Info GrB_BinaryOp_free // free a user-created binary operator
(
GrB_BinaryOp *binaryop // handle of binary operator to free
) ;
//==============================================================================
// GxB_SelectOp: select operators (historical)
//==============================================================================
// GrB_IndexUnaryOp should be used instead of GxB_SelectOp.
// GxB_SelectOp is an operator used by GxB_select to select entries from an
// input matrix A that are kept in the output C. If an entry A(i,j) in the
// matrix A, of size nrows-by-ncols, has the value aij, then it calls the
// select function as result = f (i, j, aij, thunk). If the function returns
// true, the entry is kept in the output C. If f returns false, the entry is
// not kept in C. The type of x for the GxB_SelectOp operator may be any of
// the 11 built-in types, or any user-defined type. It may also be GrB_NULL,
// to indicate that the function is type-generic and does not depend at all on
// the value aij. In this case, x is passed to f as a NULL pointer.
// The optional Thunk parameter to GxB_select is a GrB_Scalar. For built-in
// select operators (TRIL, TRIU, DIAG, and OFFDIAG), Thunk must have any
// built-in type, and thunk = (int64_t) Thunk is used to specify the diagonal
// for these operators. Thunk may be NULL, in which case its value is treated
// as zero, if it has a built-in type. The value of Thunk (if present) is not
// modified by any built-in select operator.
// For user-defined select operators, Thunk is not typecasted at all. If
// the user operator is defined with a non-NULL Thunk input, then it must
// be non-NULL and of the same type, when calling GxB_select.
// GxB_SelectOp: a function z=f(i,j,x,thunk) for the GxB_Select operation.
// The function f must have the signature:
// bool f (GrB_Index i, GrB_Index j, const void *x, const void *thunk) ;
// The values of i and j are guaranteed to be in the range 0 to
// GrB_INDEX_MAX, and they can be safely typecasted to int64_t then negated,
// if desired, without any risk of integer overflow.
typedef struct GB_SelectOp_opaque *GxB_SelectOp ;
//------------------------------------------------------------------------------
// built-in select operators (historical)
//------------------------------------------------------------------------------
// GxB_select (C, Mask, accum, op, A, Thunk, desc) always returns a matrix C of
// the same size as A (or A' if GrB_TRAN is in the descriptor).
GB_PUBLIC GxB_SelectOp
GxB_TRIL, // C=tril(A,thunk): returns true if ((j-i) <= thunk)
GxB_TRIU, // C=triu(A,thunk): returns true if ((j-i) >= thunk)
GxB_DIAG, // C=diag(A,thunk): returns true if ((j-i) == thunk)
GxB_OFFDIAG, // C=A-diag(A,thunk): returns true if ((j-i) != thunk)
GxB_NONZERO, // C=A(A ~= 0)
GxB_EQ_ZERO, // C=A(A == 0)
GxB_GT_ZERO, // C=A(A > 0)
GxB_GE_ZERO, // C=A(A >= 0)
GxB_LT_ZERO, // C=A(A < 0)
GxB_LE_ZERO, // C=A(A <= 0)
GxB_NE_THUNK, // C=A(A ~= thunk)
GxB_EQ_THUNK, // C=A(A == thunk)
GxB_GT_THUNK, // C=A(A > thunk)
GxB_GE_THUNK, // C=A(A >= thunk)
GxB_LT_THUNK, // C=A(A < thunk)
GxB_LE_THUNK ; // C=A(A <= thunk)
// For GxB_TRIL, GxB_TRIU, GxB_DIAG, and GxB_OFFDIAG, the parameter Thunk is a
// GrB_Scalar of any built-in type. If GrB_NULL, or empty, Thunk is treated as
// zero. Otherwise, the single entry is typecasted as (int64_t) Thunk.
// These select operators do not depend on the values of A, but just their
// position, and they work on matrices of any type.
// For GxB_*ZERO, the result depends only on the value of A(i,j). The Thunk
// parameter to GxB_select is ignored and may be GrB_NULL.
// The operators GxB_TRIL, GxB_TRIU, GxB_DIAG, GxB_OFFDIAG, GxB_NONZERO,
// GxB_EQ_ZERO, GxB_NE_THUNK, and GxB_EQ_THUNK work on all built-in types and
// all user-defined types.
// GxB_GT_*, GxB_GE_*, GxB_LT_*, and GxB_LE_* only work on the 11 built-in
// types (not complex). They cannot be used for user-defined types.
//------------------------------------------------------------------------------
// select operators: (historical)
//------------------------------------------------------------------------------
// User-defined GxB_SelectOps are historical. New code should use
// GrB_IndexUnaryOp_new instead.
typedef bool (*GxB_select_function) // return true if A(i,j) is kept
(
GrB_Index i, // row index of A(i,j)
GrB_Index j, // column index of A(i,j)
const void *x, // value of A(i,j)
const void *thunk // optional input for select function
) ;
#undef GxB_SelectOp_new
#undef GxM_SelectOp_new
GB_PUBLIC
GrB_Info GXB (SelectOp_new) // create a new user-defined select operator
(
GxB_SelectOp *selectop, // handle for the new select operator
GxB_select_function function,// pointer to the select function
GrB_Type xtype, // type of input x, or NULL if type-generic
GrB_Type ttype // type of thunk, or NULL if not used
) ;
#define GxB_SelectOp_new(op,f,x,t) GB_SelectOp_new (op,f,x,t, GB_STR(f))
#define GxM_SelectOp_new(op,f,x,t) GM_SelectOp_new (op,f,x,t, GB_STR(f))
// GB_SelectOp_new should not be called directly, but only through the
// GxB_SelectOp_new macro (but use GrB_IndexUnaryOp_new instead).
GB_PUBLIC
GrB_Info GB_SelectOp_new // not user-callable
(
GxB_SelectOp *selectop, // handle for the new select operator
GxB_select_function function,// pointer to the select function
GrB_Type xtype, // type of input x
GrB_Type ttype, // type of thunk, or NULL if not used
const char *name // name of the underlying function
) ;
// GxB_SelectOp_xtype is historical. Use a GrB_IndexUnaryOp instead.
GB_PUBLIC
GrB_Info GxB_SelectOp_xtype // return the type of x
(
GrB_Type *xtype, // return type of input x
GxB_SelectOp selectop // select operator
) ;
// GxB_SelectOp_ttype is historical. Use a GrB_IndexUnaryOp instead.
GB_PUBLIC
GrB_Info GxB_SelectOp_ttype // return the type of thunk
(
GrB_Type *ttype, // return type of input thunk
GxB_SelectOp selectop // select operator
) ;
GB_PUBLIC
GrB_Info GxB_SelectOp_free // free a user-created select operator
(
GxB_SelectOp *selectop // handle of select operator to free
) ;
//==============================================================================
// GrB_IndexUnaryOp: a unary operator that depends on the row/col indices
//==============================================================================
// The indexop has the form z = f(aij, i, j, y) where aij is the numerical
// value of the A(i,j) entry, i and j are its row and column index, and y
// is a scalar. For vectors, it has the form z = f(vi, i, 0, y).
typedef struct GB_IndexUnaryOp_opaque *GrB_IndexUnaryOp ;
typedef void (*GxB_index_unary_function)
(
void *z, // output value z, of type ztype
const void *x, // input value x of type xtype; value of v(i) or A(i,j)
GrB_Index i, // row index of A(i,j)
GrB_Index j, // column index of A(i,j), or zero for v(i)
const void *y // input scalar y
) ;
// GrB_IndexUnaryOp_new creates a user-defined unary op, with an automatic
// detection of the operator name.
#undef GrB_IndexUnaryOp_new
#undef GrM_IndexUnaryOp_new
GB_PUBLIC
GrB_Info GRB (IndexUnaryOp_new) // create a new user-defined IndexUnary op
(
GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator
GxB_index_unary_function function, // pointer to IndexUnary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x (the A(i,j) entry)
GrB_Type ytype // type of input y (the scalar)
) ;
#define GrB_IndexUnaryOp_new(op,f,z,x,y) \
GxB_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
#define GrM_IndexUnaryOp_new(op,f,z,x,y) \
GxM_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL)
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_new // create a named user-created IndexUnaryOp
(
GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator
GxB_index_unary_function function, // pointer to index_unary function
GrB_Type ztype, // type of output z
GrB_Type xtype, // type of input x
GrB_Type ytype, // type of input y
const char *idxop_name, // name of the user function
const char *idxop_defn // definition of the user function
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_ztype_name // return the type_name of z
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // IndexUnary operator
) ;
// For TRIL, TRIU, DIAG, OFFDIAG, COLLE, COLGT, ROWLE, and ROWGT,
// the xtype_name is an empty string (""), since these functions do not depend
// on the type of the matrix input.
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_xtype_name // return the type_name of x
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // select operator
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_ytype_name // return the type_name of the scalary y
(
char *type_name, // user array of size GxB_MAX_NAME_LEN
const GrB_IndexUnaryOp op // select operator
) ;
GB_PUBLIC
GrB_Info GrB_IndexUnaryOp_free // free a user-created IndexUnaryOp
(
GrB_IndexUnaryOp *op // handle of IndexUnary to free
) ;
//------------------------------------------------------------------------------
// built-in IndexUnaryOps
//------------------------------------------------------------------------------
// To facilitate computations with negative integers, the indices i and j are
// of type int64_t. The scalar y has the type corresponding to the suffix
// of the name of the operator.
GB_PUBLIC GrB_IndexUnaryOp
//--------------------------------------------------------------------------
// Result has the integer type INT32 or INT64, the same as the suffix
//--------------------------------------------------------------------------
// These operators work on any data type, including user-defined.
// ROWINDEX: (i+y): row index plus y
GrB_ROWINDEX_INT32, GrB_ROWINDEX_INT64,
// COLINDEX: (j+y): col index plus y
GrB_COLINDEX_INT32, GrB_COLINDEX_INT64,
// DIAGINDEX: (j-(i+y)): diagonal index plus y
GrB_DIAGINDEX_INT32, GrB_DIAGINDEX_INT64,
//--------------------------------------------------------------------------
// Result is bool, depending only on the indices i,j, and y
//--------------------------------------------------------------------------
// These operators work on any data type, including user-defined.
// The scalar y is int64.
// TRIL: (j <= (i+y)): lower triangular part
GrB_TRIL,
// TRIU: (j >= (i+y)): upper triangular part
GrB_TRIU,
// DIAG: (j == (i+y)): diagonal
GrB_DIAG,
// OFFDIAG: (j != (i+y)): offdiagonal
GrB_OFFDIAG,
// COLLE: (j <= y): columns 0:y
GrB_COLLE,
// COLGT: (j > y): columns y+1:ncols-1
GrB_COLGT,
// ROWLE: (i <= y): rows 0:y
GrB_ROWLE,
// ROWGT: (i > y): rows y+1:nrows-1
GrB_ROWGT,
//--------------------------------------------------------------------------
// Result is bool, depending only on the value aij
//--------------------------------------------------------------------------
// These operators work on matrices and vectors of any built-in type,
// including complex types. aij and the scalar y have the same type as the
// operator suffix.
// VALUEEQ: (aij == y)
GrB_VALUEEQ_INT8, GrB_VALUEEQ_UINT8, GrB_VALUEEQ_FP32, GrB_VALUEEQ_BOOL,
GrB_VALUEEQ_INT16, GrB_VALUEEQ_UINT16, GrB_VALUEEQ_FP64,
GrB_VALUEEQ_INT32, GrB_VALUEEQ_UINT32, GxB_VALUEEQ_FC32,
GrB_VALUEEQ_INT64, GrB_VALUEEQ_UINT64, GxB_VALUEEQ_FC64,
// VALUENE: (aij != y)
GrB_VALUENE_INT8, GrB_VALUENE_UINT8, GrB_VALUENE_FP32, GrB_VALUENE_BOOL,
GrB_VALUENE_INT16, GrB_VALUENE_UINT16, GrB_VALUENE_FP64,
GrB_VALUENE_INT32, GrB_VALUENE_UINT32, GxB_VALUENE_FC32,
GrB_VALUENE_INT64, GrB_VALUENE_UINT64, GxB_VALUENE_FC64,
// These operators work on matrices and vectors of any real (non-complex)
// built-in type.
// VALUELT: (aij < y)
GrB_VALUELT_INT8, GrB_VALUELT_UINT8, GrB_VALUELT_FP32, GrB_VALUELT_BOOL,
GrB_VALUELT_INT16, GrB_VALUELT_UINT16, GrB_VALUELT_FP64,
GrB_VALUELT_INT32, GrB_VALUELT_UINT32,
GrB_VALUELT_INT64, GrB_VALUELT_UINT64,
// VALUELE: (aij <= y)
GrB_VALUELE_INT8, GrB_VALUELE_UINT8, GrB_VALUELE_FP32, GrB_VALUELE_BOOL,
GrB_VALUELE_INT16, GrB_VALUELE_UINT16, GrB_VALUELE_FP64,
GrB_VALUELE_INT32, GrB_VALUELE_UINT32,
GrB_VALUELE_INT64, GrB_VALUELE_UINT64,
// VALUEGT: (aij > y)
GrB_VALUEGT_INT8, GrB_VALUEGT_UINT8, GrB_VALUEGT_FP32, GrB_VALUEGT_BOOL,
GrB_VALUEGT_INT16, GrB_VALUEGT_UINT16, GrB_VALUEGT_FP64,
GrB_VALUEGT_INT32, GrB_VALUEGT_UINT32,
GrB_VALUEGT_INT64, GrB_VALUEGT_UINT64,
// VALUEGE: (aij >= y)
GrB_VALUEGE_INT8, GrB_VALUEGE_UINT8, GrB_VALUEGE_FP32, GrB_VALUEGE_BOOL,
GrB_VALUEGE_INT16, GrB_VALUEGE_UINT16, GrB_VALUEGE_FP64,
GrB_VALUEGE_INT32, GrB_VALUEGE_UINT32,
GrB_VALUEGE_INT64, GrB_VALUEGE_UINT64 ;
//==============================================================================
// GrB_Monoid
//==============================================================================
// A monoid is an associative operator z=op(x,y) where all three types of z, x,
// and y are identical. The monoid also has an identity element, such that
// op(x,identity) = op(identity,x) = x.
typedef struct GB_Monoid_opaque *GrB_Monoid ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_BOOL // create a new boolean monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
bool identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT8 // create a new int8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int8_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT8 // create a new uint8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint8_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT16 // create a new int16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int16_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT16 // create a new uint16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint16_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT32 // create a new int32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT32 // create a new uint32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_INT64 // create a new int64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UINT64 // create a new uint64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_FP32 // create a new float monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
float identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_FP64 // create a new double monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
double identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_new_FC32 // create a new float complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC32_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_new_FC64 // create a new double complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC64_t identity // identity value of the monoid
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_new_UDT // create a monoid with a user-defined type
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
void *identity // identity value of the monoid
) ;
// Type-generic method for creating a new monoid:
/*
GB_PUBLIC
GrB_Info GrB_Monoid_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
<type> identity // identity value of the monoid
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Monoid_new(monoid,op,identity) \
_Generic \
( \
(identity), \
GB_CASES (, GrB, Monoid_new) \
) \
(monoid, op, identity)
#endif
// GxB_Monoid_terminal_new is identical to GrB_Monoid_new, except that a
// terminal value can be specified. The terminal may be NULL, which indicates
// no terminal value (and in this case, it is identical to GrB_Monoid_new).
// The terminal value, if not NULL, must have the same type as the identity.
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_BOOL // create a new boolean monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
bool identity, // identity value of the monoid
bool terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT8 // create a new int8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int8_t identity, // identity value of the monoid
int8_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT8 // create a new uint8 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint8_t identity, // identity value of the monoid
uint8_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT16 // create a new int16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int16_t identity, // identity value of the monoid
int16_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT16 // create a new uint16 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint16_t identity, // identity value of the monoid
uint16_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT32 // create a new int32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int32_t identity, // identity value of the monoid
int32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT32 // create a new uint32 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint32_t identity, // identity value of the monoid
uint32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_INT64 // create a new int64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
int64_t identity, // identity value of the monoid
int64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UINT64 // create a new uint64 monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
uint64_t identity, // identity value of the monoid
uint64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FP32 // create a new float monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
float identity, // identity value of the monoid
float terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FP64 // create a new double monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
double identity, // identity value of the monoid
double terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FC32 // create a new float complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC32_t identity, // identity value of the monoid
GxB_FC32_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_FC64 // create a new double complex monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
GxB_FC64_t identity, // identity value of the monoid
GxB_FC64_t terminal // terminal value of the monoid
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new_UDT // create a monoid with a user type
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
void *identity, // identity value of the monoid
void *terminal // terminal value of the monoid
) ;
// Type-generic method for creating a new monoid with a terminal value:
/*
GB_PUBLIC
GrB_Info GxB_Monoid_terminal_new // create a monoid
(
GrB_Monoid *monoid, // handle of monoid to create
GrB_BinaryOp op, // binary operator of the monoid
<type> identity, // identity value of the monoid
<type> terminal // terminal value of the monoid
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GxB_Monoid_terminal_new(monoid,op,identity,terminal) \
_Generic \
( \
(identity), \
GB_CASES (, GxB, Monoid_terminal_new) \
) \
(monoid, op, identity, terminal)
#endif
GB_PUBLIC
GrB_Info GxB_Monoid_operator // return the monoid operator
(
GrB_BinaryOp *op, // returns the binary op of the monoid
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_identity // return the monoid identity
(
void *identity, // returns the identity of the monoid
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_terminal // return the monoid terminal
(
bool *has_terminal, // true if the monoid has a terminal value
void *terminal, // returns the terminal of the monoid,
// unmodified if has_terminal is false
GrB_Monoid monoid // monoid to query
) ;
GB_PUBLIC
GrB_Info GrB_Monoid_free // free a user-created monoid
(
GrB_Monoid *monoid // handle of monoid to free
) ;
//==============================================================================
// GrB_Semiring
//==============================================================================
typedef struct GB_Semiring_opaque *GrB_Semiring ;
GB_PUBLIC
GrB_Info GrB_Semiring_new // create a semiring
(
GrB_Semiring *semiring, // handle of semiring to create
GrB_Monoid add, // add monoid of the semiring
GrB_BinaryOp multiply // multiply operator of the semiring
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_add // return the add monoid of a semiring
(
GrB_Monoid *add, // returns add monoid of the semiring
GrB_Semiring semiring // semiring to query
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_multiply // return multiply operator of a semiring
(
GrB_BinaryOp *multiply, // returns multiply operator of the semiring
GrB_Semiring semiring // semiring to query
) ;
GB_PUBLIC
GrB_Info GrB_Semiring_free // free a user-created semiring
(
GrB_Semiring *semiring // handle of semiring to free
) ;
//==============================================================================
// GrB_Scalar: a GraphBLAS scalar
//==============================================================================
// GxB_Scalar has become GrB_Scalar. The older name GxB_Scalar is kept as
// historical, but GrB_Scalar should be used instead.
typedef struct GB_Scalar_opaque *GxB_Scalar ; // historical: use GrB_Scalar
typedef struct GB_Scalar_opaque *GrB_Scalar ; // use this instead
// These methods create, free, copy, and clear a GrB_Scalar. The nvals,
// and type methods return basic information about a GrB_Scalar.
GB_PUBLIC
GrB_Info GrB_Scalar_new // create a new GrB_Scalar with no entry
(
GrB_Scalar *s, // handle of GrB_Scalar to create
GrB_Type type // type of GrB_Scalar to create
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_dup // make an exact copy of a GrB_Scalar
(
GrB_Scalar *s, // handle of output GrB_Scalar to create
const GrB_Scalar t // input GrB_Scalar to copy
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_clear // clear a GrB_Scalar of its entry
( // type remains unchanged.
GrB_Scalar s // GrB_Scalar to clear
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar
(
GrB_Index *nvals, // GrB_Scalar has nvals entries (0 or 1)
const GrB_Scalar s // GrB_Scalar to query
) ;
// NOTE: GxB_Scalar_type is historical. Use GxB_Scalar_type_name instead.
GB_PUBLIC
GrB_Info GxB_Scalar_type // get the type of a GrB_Scalar
(
GrB_Type *type, // returns the type of the GrB_Scalar
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_type_name // return the name of the type of a scalar
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_memoryUsage // return # of bytes used for a scalar
(
size_t *size, // # of bytes used by the scalar s
const GrB_Scalar s // GrB_Scalar to query
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_free // free a GrB_Scalar
(
GrB_Scalar *s // handle of GrB_Scalar to free
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_new (GrB_Scalar *s, GrB_Type type) ;
GB_PUBLIC GrB_Info GxB_Scalar_dup (GrB_Scalar *s, const GrB_Scalar t) ;
GB_PUBLIC GrB_Info GxB_Scalar_clear (GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_nvals (GrB_Index *nvals, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_free (GrB_Scalar *s) ;
//------------------------------------------------------------------------------
// GrB_Scalar_setElement
//------------------------------------------------------------------------------
// Set a single GrB_Scalar s, from a user scalar x: s = x, typecasting from the
// type of x to the type of w as needed.
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_BOOL // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
bool x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT8 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int8_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT8 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint8_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT16 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int16_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT16 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint16_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_INT64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
int64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UINT64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
uint64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_FP32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
float x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_FP64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
double x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_setElement_FC32 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
GxB_FC32_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_setElement_FC64 // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
GxB_FC64_t x // user scalar to assign to s
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_setElement_UDT // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
void *x // user scalar to assign to s
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_setElement_BOOL (GrB_Scalar s, bool x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT8 (GrB_Scalar s, int8_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT16 (GrB_Scalar s, int16_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT32 (GrB_Scalar s, int32_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT64 (GrB_Scalar s, int64_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT8 (GrB_Scalar s, uint8_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT16 (GrB_Scalar s, uint16_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT32 (GrB_Scalar s, uint32_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT64 (GrB_Scalar s, uint64_t x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP32 (GrB_Scalar s, float x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP64 (GrB_Scalar s, double x) ;
GB_PUBLIC GrB_Info GxB_Scalar_setElement_UDT (GrB_Scalar s, void *x) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Scalar_setElement // s = x
(
GrB_Scalar s, // GrB_Scalar to modify
<type> x // user scalar to assign to s
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Scalar_setElement(s,x) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Scalar_setElement) \
) \
(s, x)
#define GxB_Scalar_setElement(s,x) GrB_Scalar_setElement (s, x)
#endif
//------------------------------------------------------------------------------
// GrB_Scalar_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a GrB_Scalar, x = s, typecasting from the type
// of s to the type of x as needed.
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_BOOL // x = s
(
bool *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT8 // x = s
(
int8_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT8 // x = s
(
uint8_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT16 // x = s
(
int16_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT16 // x = s
(
uint16_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT32 // x = s
(
int32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT32 // x = s
(
uint32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_INT64 // x = s
(
int64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UINT64 // x = s
(
uint64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_FP32 // x = s
(
float *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_FP64 // x = s
(
double *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_extractElement_FC32 // x = s
(
GxB_FC32_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_extractElement_FC64 // x = s
(
GxB_FC64_t *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement_UDT // x = s
(
void *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
// historical names identical to GrB_Scalar_methods above:
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_BOOL (bool *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT8 (int8_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT16 (int16_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT32 (int32_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT64 (int64_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT8 (uint8_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT16 (uint16_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT32 (uint32_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT64 (uint64_t *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP32 (float *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP64 (double *x, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UDT (void *x, const GrB_Scalar s) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Scalar_extractElement // x = s
(
<type> *x, // user scalar extracted
const GrB_Scalar s // GrB_Scalar to extract an entry from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Scalar_extractElement(x,s) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Scalar_extractElement) \
) \
(x, s)
#define GxB_Scalar_extractElement(x,s) GrB_Scalar_extractElement (x, s)
#endif
//==============================================================================
// GrB_Vector: a GraphBLAS vector
//==============================================================================
typedef struct GB_Vector_opaque *GrB_Vector ;
// These methods create, free, copy, and clear a vector. The size, nvals,
// and type methods return basic information about a vector.
GB_PUBLIC
GrB_Info GrB_Vector_new // create a new vector with no entries
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n // vector dimension is n-by-1
// (n must be <= GrB_INDEX_MAX+1)
) ;
GB_PUBLIC
GrB_Info GrB_Vector_dup // make an exact copy of a vector
(
GrB_Vector *w, // handle of output vector to create
const GrB_Vector u // input vector to copy
) ;
GB_PUBLIC
GrB_Info GrB_Vector_clear // clear a vector of all entries;
( // type and dimension remain unchanged.
GrB_Vector v // vector to clear
) ;
GB_PUBLIC
GrB_Info GrB_Vector_size // get the dimension of a vector
(
GrB_Index *n, // vector dimension is n-by-1
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GrB_Vector_nvals // get the number of entries in a vector
(
GrB_Index *nvals, // vector has nvals entries
const GrB_Vector v // vector to query
) ;
// NOTE: GxB_Vector_type is historical. Use GxB_Vector_type_name instead.
GB_PUBLIC
GrB_Info GxB_Vector_type // get the type of a vector
(
GrB_Type *type, // returns the type of the vector
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_type_name // return the name of the type of a vector
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_memoryUsage // return # of bytes used for a vector
(
size_t *size, // # of bytes used by the vector v
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GxB_Vector_iso // return iso status of a vector
(
bool *iso, // true if the vector is iso-valued
const GrB_Vector v // vector to query
) ;
GB_PUBLIC
GrB_Info GrB_Vector_free // free a vector
(
GrB_Vector *v // handle of vector to free
) ;
//------------------------------------------------------------------------------
// GrB_Vector_build
//------------------------------------------------------------------------------
// GrB_Vector_build: w = sparse (I,1,X), but using any
// associative operator to assemble duplicate entries.
GB_PUBLIC
GrB_Info GrB_Vector_build_BOOL // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const bool *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT8 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT8 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT16 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT16 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_INT64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const int64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UINT64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const uint64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_FP32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const float *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_FP64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const double *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_FC32 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const GxB_FC32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_FC64 // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const GxB_FC64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Vector_build_UDT // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const void *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Vector_build_Scalar // build a vector from (i,scalar) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
GrB_Scalar scalar, // value for all tuples
GrB_Index nvals // number of tuples
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_build // build a vector from (I,X) tuples
(
GrB_Vector w, // vector to build
const GrB_Index *I, // array of row indices of tuples
const <type> *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_build(w,I,X,nvals,dup) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Vector_build) \
) \
(w, I, ((const void *) (X)), nvals, dup)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_setElement
//------------------------------------------------------------------------------
// Set a single scalar in a vector, w(i) = x, typecasting from the type of x to
// the type of w as needed.
GB_PUBLIC
GrB_Info GrB_Vector_setElement_BOOL // w(i) = x
(
GrB_Vector w, // vector to modify
bool x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT8 // w(i) = x
(
GrB_Vector w, // vector to modify
int8_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT8 // w(i) = x
(
GrB_Vector w, // vector to modify
uint8_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT16 // w(i) = x
(
GrB_Vector w, // vector to modify
int16_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT16 // w(i) = x
(
GrB_Vector w, // vector to modify
uint16_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT32 // w(i) = x
(
GrB_Vector w, // vector to modify
int32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT32 // w(i) = x
(
GrB_Vector w, // vector to modify
uint32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_INT64 // w(i) = x
(
GrB_Vector w, // vector to modify
int64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UINT64 // w(i) = x
(
GrB_Vector w, // vector to modify
uint64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_FP32 // w(i) = x
(
GrB_Vector w, // vector to modify
float x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_FP64 // w(i) = x
(
GrB_Vector w, // vector to modify
double x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_setElement_FC32 // w(i) = x
(
GrB_Vector w, // vector to modify
GxB_FC32_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_setElement_FC64 // w(i) = x
(
GrB_Vector w, // vector to modify
GxB_FC64_t x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_UDT // w(i) = x
(
GrB_Vector w, // vector to modify
void *x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_setElement_Scalar // w(i) = x
(
GrB_Vector w, // vector to modify
GrB_Scalar x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_setElement // w(i) = x
(
GrB_Vector w, // vector to modify
<type> x, // scalar to assign to w(i)
GrB_Index i // row index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_setElement(w,x,i) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Vector_setElement), \
default: GrB_Vector_setElement_Scalar \
) \
(w, x, i)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a vector, x = v(i), typecasting from the type of
// v to the type of x as needed.
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_BOOL // x = v(i)
(
bool *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT8 // x = v(i)
(
int8_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT8 // x = v(i)
(
uint8_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT16 // x = v(i)
(
int16_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT16 // x = v(i)
(
uint16_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT32 // x = v(i)
(
int32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT32 // x = v(i)
(
uint32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_INT64 // x = v(i)
(
int64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UINT64 // x = v(i)
(
uint64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_FP32 // x = v(i)
(
float *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_FP64 // x = v(i)
(
double *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractElement_FC32 // x = v(i)
(
GxB_FC32_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractElement_FC64 // x = v(i)
(
GxB_FC64_t *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_UDT // x = v(i)
(
void *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractElement_Scalar // x = v(i)
(
GrB_Scalar x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_extractElement // x = v(i)
(
<type> *x, // scalar extracted
const GrB_Vector v, // vector to extract an entry from
GrB_Index i // row index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_extractElement(x,v,i) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Vector_extractElement), \
default: GrB_Vector_extractElement_Scalar \
) \
(x, v, i)
#endif
//------------------------------------------------------------------------------
// GrB_Vector_removeElement
//------------------------------------------------------------------------------
// GrB_Vector_removeElement (v,i) removes the element v(i) from the vector v.
GB_PUBLIC
GrB_Info GrB_Vector_removeElement
(
GrB_Vector v, // vector to remove an element from
GrB_Index i // index
) ;
//------------------------------------------------------------------------------
// GrB_Vector_extractTuples
//------------------------------------------------------------------------------
// Extracts all tuples from a vector, like [I,~,X] = find (v). If
// any parameter I and/or X is NULL, then that component is not extracted. For
// example, to extract just the row indices, pass I as non-NULL, and X as NULL.
// This is like [I,~,~] = find (v).
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_BOOL // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
bool *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT8 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT8 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT16 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT16 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_INT64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
int64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UINT64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
uint64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_FP32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
float *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_FP64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
double *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractTuples_FC32 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
GxB_FC32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Vector_extractTuples_FC64 // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
GxB_FC64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples_UDT // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
void *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Vector_extractTuples // [I,~,X] = find (v)
(
GrB_Index *I, // array for returning row indices of tuples
<type> *X, // array for returning values of tuples
GrB_Index *nvals, // I, X size on input; # tuples on output
const GrB_Vector v // vector to extract tuples from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Vector_extractTuples(I,X,nvals,v) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Vector_extractTuples) \
) \
(I, X, nvals, v)
#endif
//==============================================================================
// GrB_Matrix: a GraphBLAS matrix
//==============================================================================
typedef struct GB_Matrix_opaque *GrB_Matrix ;
// These methods create, free, copy, and clear a matrix. The nrows, ncols,
// nvals, and type methods return basic information about a matrix.
GB_PUBLIC
GrB_Info GrB_Matrix_new // create a new matrix with no entries
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // matrix dimension is nrows-by-ncols
GrB_Index ncols // (nrows and ncols must be <= GrB_INDEX_MAX+1)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_dup // make an exact copy of a matrix
(
GrB_Matrix *C, // handle of output matrix to create
const GrB_Matrix A // input matrix to copy
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_clear // clear a matrix of all entries;
( // type and dimensions remain unchanged
GrB_Matrix A // matrix to clear
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix
(
GrB_Index *nrows, // matrix has nrows rows
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_ncols // get the number of columns of a matrix
(
GrB_Index *ncols, // matrix has ncols columns
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix
(
GrB_Index *nvals, // matrix has nvals entries
const GrB_Matrix A // matrix to query
) ;
// NOTE: GxB_Matrix_type is historical. Use GxB_Matrix_type_name instead.
GB_PUBLIC
GrB_Info GxB_Matrix_type // get the type of a matrix
(
GrB_Type *type, // returns the type of the matrix
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_type_name // return the name of the type of a matrix
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_memoryUsage // return # of bytes used for a matrix
(
size_t *size, // # of bytes used by the matrix A
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_iso // return iso status of a matrix
(
bool *iso, // true if the matrix is iso-valued
const GrB_Matrix A // matrix to query
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_free // free a matrix
(
GrB_Matrix *A // handle of matrix to free
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_build
//------------------------------------------------------------------------------
// GrB_Matrix_build: C = sparse (I,J,X), but using any
// associative operator to assemble duplicate entries.
GB_PUBLIC
GrB_Info GrB_Matrix_build_BOOL // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const bool *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT8 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT8 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint8_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT16 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT16 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint16_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_INT64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const int64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UINT64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const uint64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_FP32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const float *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_FP64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const double *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_FC32 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const GxB_FC32_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_FC64 // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const GxB_FC64_t *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_build_UDT // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const void *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_build_Scalar // build a matrix from (I,J,scalar) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
GrB_Scalar scalar, // value for all tuples
GrB_Index nvals // number of tuples
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_build // build a matrix from (I,J,X) tuples
(
GrB_Matrix C, // matrix to build
const GrB_Index *I, // array of row indices of tuples
const GrB_Index *J, // array of column indices of tuples
const <type> *X, // array of values of tuples
GrB_Index nvals, // number of tuples
const GrB_BinaryOp dup // binary function to assemble duplicates
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_build(C,I,J,X,nvals,dup) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Matrix_build) \
) \
(C, I, J, ((const void *) (X)), nvals, dup)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_setElement
//------------------------------------------------------------------------------
// Set a single entry in a matrix, C(i,j) = x, typecasting
// from the type of x to the type of C, as needed.
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_BOOL // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
bool x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT8 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int8_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT8 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint8_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT16 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int16_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT16 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint16_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_INT64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
int64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UINT64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
uint64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_FP32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
float x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_FP64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
double x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_setElement_FC32 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GxB_FC32_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_setElement_FC64 // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GxB_FC64_t x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_UDT // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
void *x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_setElement_Scalar // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
GrB_Scalar x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
// Type-generic version: x can be any supported C type or void * for a
// user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_setElement // C (i,j) = x
(
GrB_Matrix C, // matrix to modify
<type> x, // scalar to assign to C(i,j)
GrB_Index i, // row index
GrB_Index j // column index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_setElement(C,x,i,j) \
_Generic \
( \
(x), \
GB_CASES (, GrB, Matrix_setElement), \
default: GrB_Matrix_setElement_Scalar \
) \
(C, x, i, j)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_extractElement
//------------------------------------------------------------------------------
// Extract a single entry from a matrix, x = A(i,j), typecasting from the type
// of A to the type of x, as needed.
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_BOOL // x = A(i,j)
(
bool *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT8 // x = A(i,j)
(
int8_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT8 // x = A(i,j)
(
uint8_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT16 // x = A(i,j)
(
int16_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT16 // x = A(i,j)
(
uint16_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT32 // x = A(i,j)
(
int32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT32 // x = A(i,j)
(
uint32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_INT64 // x = A(i,j)
(
int64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UINT64 // x = A(i,j)
(
uint64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_FP32 // x = A(i,j)
(
float *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_FP64 // x = A(i,j)
(
double *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractElement_FC32 // x = A(i,j)
(
GxB_FC32_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractElement_FC64 // x = A(i,j)
(
GxB_FC64_t *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_UDT // x = A(i,j)
(
void *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement_Scalar // x = A(i,j)
(
GrB_Scalar x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
// Type-generic version: x can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_extractElement // x = A(i,j)
(
<type> *x, // extracted scalar
const GrB_Matrix A, // matrix to extract a scalar from
GrB_Index i, // row index
GrB_Index j // column index
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_extractElement(x,A,i,j) \
_Generic \
( \
(x), \
GB_CASES (*, GrB, Matrix_extractElement), \
default: GrB_Matrix_extractElement_Scalar \
) \
(x, A, i, j)
#endif
//------------------------------------------------------------------------------
// GrB_Matrix_removeElement
//------------------------------------------------------------------------------
// GrB_Matrix_removeElement (A,i,j) removes the entry A(i,j) from the matrix A.
GB_PUBLIC
GrB_Info GrB_Matrix_removeElement
(
GrB_Matrix C, // matrix to remove entry from
GrB_Index i, // row index
GrB_Index j // column index
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_extractTuples
//------------------------------------------------------------------------------
// Extracts all tuples from a matrix, like [I,J,X] = find (A). If
// any parameter I, J and/or X is NULL, then that component is not extracted.
// For example, to extract just the row and col indices, pass I and J as
// non-NULL, and X as NULL. This is like [I,J,~] = find (A).
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_BOOL // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
bool *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT8 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT8 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint8_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT16 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT16 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint16_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_INT64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
int64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UINT64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
uint64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_FP32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
float *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_FP64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
double *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractTuples_FC32 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
GxB_FC32_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_extractTuples_FC64 // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
GxB_FC64_t *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples_UDT // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
void *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
// Type-generic version: X can be a pointer to any supported C type or void *
// for a user-defined type.
/*
GB_PUBLIC
GrB_Info GrB_Matrix_extractTuples // [I,J,X] = find (A)
(
GrB_Index *I, // array for returning row indices of tuples
GrB_Index *J, // array for returning col indices of tuples
<type> *X, // array for returning values of tuples
GrB_Index *nvals, // I,J,X size on input; # tuples on output
const GrB_Matrix A // matrix to extract tuples from
) ;
*/
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_extractTuples(I,J,X,nvals,A) \
_Generic \
( \
(X), \
GB_CASES (*, GrB, Matrix_extractTuples) \
) \
(I, J, X, nvals, A)
#endif
//------------------------------------------------------------------------------
// GxB_Matrix_concat and GxB_Matrix_split
//------------------------------------------------------------------------------
// GxB_Matrix_concat concatenates an array of matrices (Tiles) into a single
// GrB_Matrix C.
// Tiles is an m-by-n dense array of matrices held in row-major format, where
// Tiles [i*n+j] is the (i,j)th tile, and where m > 0 and n > 0 must hold. Let
// A{i,j} denote the (i,j)th tile. The matrix C is constructed by
// concatenating these tiles together, as:
// C = [ A{0,0} A{0,1} A{0,2} ... A{0,n-1}
// A{1,0} A{1,1} A{1,2} ... A{1,n-1}
// ...
// A{m-1,0} A{m-1,1} A{m-1,2} ... A{m-1,n-1} ]
// On input, the matrix C must already exist. Any existing entries in C are
// discarded. C must have dimensions nrows by ncols where nrows is the sum of
// # of rows in the matrices A{i,0} for all i, and ncols is the sum of the # of
// columns in the matrices A{0,j} for all j. All matrices in any given tile
// row i must have the same number of rows (that is, nrows(A{i,0}) must equal
// nrows(A{i,j}) for all j), and all matrices in any given tile column j must
// have the same number of columns (that is, ncols(A{0,j}) must equal
// ncols(A{i,j}) for all i).
// The type of C is unchanged, and all matrices A{i,j} are typecasted into the
// type of C. Any settings made to C by GxB_Matrix_Option_set (format by row
// or by column, bitmap switch, hyper switch, and sparsity control) are
// unchanged.
GB_PUBLIC
GrB_Info GxB_Matrix_concat // concatenate a 2D array of matrices
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const GrB_Descriptor desc // unused, except threading control
) ;
// GxB_Matrix_split does the opposite of GxB_Matrix_concat. It splits a single
// input matrix A into a 2D array of tiles. On input, the Tiles array must be
// a non-NULL pointer to a previously allocated array of size at least m*n
// where both m and n must be > 0. The Tiles_nrows array has size m, and
// Tiles_ncols has size n. The (i,j)th tile has dimension
// Tiles_nrows[i]-by-Tiles_ncols[j]. The sum of Tiles_nrows [0:m-1] must equal
// the number of rows of A, and the sum of Tiles_ncols [0:n-1] must equal the
// number of columns of A. The type of each tile is the same as the type of A;
// no typecasting is done.
GB_PUBLIC
GrB_Info GxB_Matrix_split // split a matrix into 2D array of matrices
(
GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const GrB_Index *Tile_nrows, // array of size m
const GrB_Index *Tile_ncols, // array of size n
const GrB_Matrix A, // input matrix to split
const GrB_Descriptor desc // unused, except threading control
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_diag, GxB_Vector_diag, GrB_Matrix_diag
//------------------------------------------------------------------------------
// GrB_Matrix_diag constructs a new matrix from a vector. Let n be the length
// of the v vector, from GrB_Vector_size (&n, v). If k = 0, then C is an
// n-by-n diagonal matrix with the entries from v along the main diagonal of C,
// with C(i,i) = v(i). If k is nonzero, C is square with dimension n+abs(k).
// If k is positive, it denotes diagonals above the main diagonal, with
// C(i,i+k) = v(i). If k is negative, it denotes diagonals below the main
// diagonal of C, with C(i-k,i) = v(i). C is constructed with the same type
// as v.
GB_PUBLIC
GrB_Info GrB_Matrix_diag // build a diagonal matrix from a vector
(
GrB_Matrix *C, // output matrix
const GrB_Vector v, // input vector
int64_t k
) ;
// GrB_Matrix_diag is like GxB_Matrix_diag (&C, v, k, NULL), except that C must
// already exist on input, of the correct size. Any existing entries in C are
// discarded. The type of C is preserved, so that if the type of C and v
// differ, the entries are typecasted into the type of C. Any settings made to
// C by GxB_Matrix_Option_set (format by row or by column, bitmap switch, hyper
// switch, and sparsity control) are unchanged.
GB_PUBLIC
GrB_Info GxB_Matrix_diag // construct a diagonal matrix from a vector
(
GrB_Matrix C, // output matrix
const GrB_Vector v, // input vector
int64_t k,
const GrB_Descriptor desc // to specify # of threads
) ;
// GxB_Vector_diag extracts a vector v from an input matrix A, which may be
// rectangular. If k = 0, the main diagonal of A is extracted; k > 0 denotes
// diagonals above the main diagonal of A, and k < 0 denotes diagonals below
// the main diagonal of A. Let A have dimension m-by-n. If k is in the range
// 0 to n-1, then v has length min(m,n-k). If k is negative and in the range
// -1 to -m+1, then v has length min(m+k,n). If k is outside these ranges,
// v has length 0 (this is not an error).
// v must already exist on input, of the correct length; that is
// GrB_Vector_size (&len,v) must return len = 0 if k >= n or k <= -m, len =
// min(m,n-k) if k is in the range 0 to n-1, and len = min(m+k,n) if k is in
// the range -1 to -m+1. Any existing entries in v are discarded. The type of
// v is preserved, so that if the type of A and v differ, the entries are
// typecasted into the type of v. Any settings made to v by
// GxB_Vector_Option_set (bitmap switch and sparsity control) are unchanged.
GB_PUBLIC
GrB_Info GxB_Vector_diag // extract a diagonal from a matrix, as a vector
(
GrB_Vector v, // output vector
const GrB_Matrix A, // input matrix
int64_t k,
const GrB_Descriptor desc // unused, except threading control
) ;
//==============================================================================
// SuiteSparse:GraphBLAS options
//==============================================================================
// The following options modify how SuiteSparse:GraphBLAS stores and operates
// on its matrices. The GxB_*Option* methods allow the user to suggest how the
// internal representation of a matrix, or all matrices, should be held. These
// options have no effect on the result (except for minor roundoff differences
// for floating-point types). They only affect the time and memory usage of the
// computations.
// GxB_Matrix_Option_set: sets an option for a specific matrix
// GxB_Matrix_Option_get: queries the current option of a specific matrix
// GxB_Vector_Option_set: sets an option for a specific vector
// GxB_Vector_Option_get: queries the current option of a specific vector
// GxB_Global_Option_set: sets an option for all future matrices
// GxB_Global_Option_get: queries current option for all future matrices
#define GxB_HYPER 0 // (historical, use GxB_HYPER_SWITCH)
typedef enum // for global options or matrix options
{
//------------------------------------------------------------
// for GxB_Matrix_Option_get/set and GxB_Global_Option_get/set:
//------------------------------------------------------------
GxB_HYPER_SWITCH = 0, // defines switch to hypersparse (a double value)
GxB_BITMAP_SWITCH = 34, // defines switch to bitmap (a double value)
GxB_FORMAT = 1, // defines CSR/CSC format: GxB_BY_ROW or GxB_BY_COL
//------------------------------------------------------------
// for GxB_Global_Option_get only:
//------------------------------------------------------------
GxB_MODE = 2, // mode passed to GrB_init (blocking or non-blocking)
GxB_LIBRARY_NAME = 8, // name of the library (char *)
GxB_LIBRARY_VERSION = 9, // library version (3 int's)
GxB_LIBRARY_DATE = 10, // date of the library (char *)
GxB_LIBRARY_ABOUT = 11, // about the library (char *)
GxB_LIBRARY_URL = 12, // URL for the library (char *)
GxB_LIBRARY_LICENSE = 13, // license of the library (char *)
GxB_LIBRARY_COMPILE_DATE = 14, // date library was compiled (char *)
GxB_LIBRARY_COMPILE_TIME = 15, // time library was compiled (char *)
GxB_API_VERSION = 16, // API version (3 int's)
GxB_API_DATE = 17, // date of the API (char *)
GxB_API_ABOUT = 18, // about the API (char *)
GxB_API_URL = 19, // URL for the API (char *)
GxB_COMPILER_VERSION = 23, // compiler version (3 int's)
GxB_COMPILER_NAME = 24, // compiler name (char *)
//------------------------------------------------------------
// for GxB_Global_Option_get/set only:
//------------------------------------------------------------
GxB_GLOBAL_NTHREADS = GxB_NTHREADS, // max number of threads to use
// If <= GxB_DEFAULT, then GraphBLAS selects the number
// of threads automatically.
GxB_GLOBAL_CHUNK = GxB_CHUNK, // chunk size for small problems.
// If <= GxB_DEFAULT, then the default is used.
GxB_BURBLE = 99, // diagnostic output (bool *)
GxB_PRINTF = 101, // printf function diagnostic output
GxB_FLUSH = 102, // flush function diagnostic output
GxB_MEMORY_POOL = 103, // memory pool control
GxB_PRINT_1BASED = 104, // print matrices as 0-based or 1-based
//------------------------------------------------------------
// for GxB_Matrix_Option_get only:
//------------------------------------------------------------
GxB_SPARSITY_STATUS = 33, // hyper, sparse, bitmap or full (1,2,4,8)
GxB_IS_HYPER = 6, // historical; use GxB_SPARSITY_STATUS
//------------------------------------------------------------
// for GxB_Matrix_Option_get/set only:
//------------------------------------------------------------
GxB_SPARSITY_CONTROL = 32, // sparsity control: 0 to 15; see below
//------------------------------------------------------------
// GPU and options (DRAFT: do not use)
//------------------------------------------------------------
GxB_GLOBAL_GPU_CONTROL = GxB_GPU_CONTROL,
GxB_GLOBAL_GPU_CHUNK = GxB_GPU_CHUNK,
} GxB_Option_Field ;
// GxB_FORMAT can be by row or by column:
typedef enum
{
GxB_BY_ROW = 0, // CSR: compressed sparse row format
GxB_BY_COL = 1, // CSC: compressed sparse column format
GxB_NO_FORMAT = -1 // format not defined
}
GxB_Format_Value ;
// The default format is by row. These constants are defined as GB_PUBLIC
// const, so that if SuiteSparse:GraphBLAS is recompiled with a different
// default format, and the application is relinked but not recompiled, it will
// acquire the new default values.
GB_PUBLIC const GxB_Format_Value GxB_FORMAT_DEFAULT ;
// the default hyper_switch parameter
GB_PUBLIC const double GxB_HYPER_DEFAULT ;
// GxB_SPARSITY_CONTROL can be any sum or bitwise OR of these 4 values:
#define GxB_HYPERSPARSE 1 // store matrix in hypersparse form
#define GxB_SPARSE 2 // store matrix as sparse form (compressed vector)
#define GxB_BITMAP 4 // store matrix as a bitmap
#define GxB_FULL 8 // store matrix as full; all entries must be present
// size of b array for GxB_set/get (GxB_BITMAP_SWITCH, b)
#define GxB_NBITMAP_SWITCH 8 // size of bitmap_switch parameter array
// any sparsity value:
#define GxB_ANY_SPARSITY (GxB_HYPERSPARSE + GxB_SPARSE + GxB_BITMAP + GxB_FULL)
// the default sparsity control is any format:
#define GxB_AUTO_SPARSITY GxB_ANY_SPARSITY
// GxB_Matrix_Option_set (A, GxB_SPARSITY_CONTROL, scontrol) provides hints
// about which data structure GraphBLAS should use for the matrix A:
//
// GxB_AUTO_SPARSITY: GraphBLAS selects automatically.
// GxB_HYPERSPARSE: always hypersparse, taking O(nvals(A)) space.
// GxB_SPARSE: always in a sparse struture: compressed-sparse row/column,
// taking O(nrows+nvals(A)) space if stored by row, or
// O(ncols+nvals(A)) if stored by column.
// GxB_BITMAP: always in a bitmap struture, taking O(nrows*ncols) space.
// GxB_FULL: always in a full structure, taking O(nrows*ncols) space,
// unless not all entries are present, in which case the bitmap
// storage is used.
//
// These options can be summed. For example, to allow a matrix to be sparse
// or hypersparse, but not bitmap or full, use GxB_SPARSE + GxB_HYPERSPARSE.
// Since GxB_FULL can only be used when all entries are present, matrices with
// the just GxB_FULL control setting are stored in bitmap form if any entries
// are not present.
//
// Only the least 4 bits of the sparsity control are considered, so the
// formats can be bitwise negated. For example, to allow for any format
// except full, use ~GxB_FULL.
//
// GxB_Matrix_Option_get (A, GxB_SPARSITY_STATUS, &sparsity) returns the
// current data structure currently used for the matrix A (either hypersparse,
// sparse, bitmap, or full).
//
// GxB_Matrix_Option_get (A, GxB_SPARSITY_CONTROL, &scontrol) returns the hint
// for how A should be stored (hypersparse, sparse, bitmap, or full, or any
// combination).
// GxB_HYPER_SWITCH:
// If the matrix or vector structure can be sparse or hypersparse, the
// GxB_HYPER_SWITCH parameter controls when each of these structures are
// used. The parameter is not used if the matrix or vector is full or
// bitmap.
//
// Let k be the actual number of non-empty vectors (with at least one
// entry). This value k is not dependent on whether or not the matrix is
// stored in hypersparse structure. Let n be the number of vectors (the #
// of columns if CSC, or rows if CSR). Let h be the value of the
// GxB_HYPER_SWITCH setting of the matrix.
//
// If a matrix is currently hypersparse, it can be converted to
// non-hypersparse if (n <= 1 || k > 2*n*h). Otherwise it stays
// hypersparse. If (n <= 1) the matrix is always stored as
// non-hypersparse.
//
// If currently non-hypersparse, it can be converted to hypersparse if (n
// > 1 && k <= n*h). Otherwise, it stays non-hypersparse. If (n <= 1)
// the matrix always remains non-hypersparse.
//
// Setting GxB_HYPER_SWITCH to GxB_ALWAYS_HYPER or GxB_NEVER_HYPER ensures
// a matrix always stays hypersparse, or always stays non-hypersparse,
// respectively.
GB_PUBLIC const double GxB_ALWAYS_HYPER, GxB_NEVER_HYPER ;
GB_PUBLIC
GrB_Info GxB_Matrix_Option_set // set an option in a matrix
(
GrB_Matrix A, // matrix to modify
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_Option_get // gets the current option of a matrix
(
GrB_Matrix A, // matrix to query
GxB_Option_Field field, // option to query
... // return value of the matrix option
) ;
GB_PUBLIC
GrB_Info GxB_Vector_Option_set // set an option in a vector
(
GrB_Vector A, // vector to modify
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Vector_Option_get // gets the current option of a vector
(
GrB_Vector A, // vector to query
GxB_Option_Field field, // option to query
... // return value of the vector option
) ;
// GxB_Global_Option_set controls the global defaults used when a new matrix is
// created. GrB_init defines the following initial settings:
//
// GxB_Global_Option_set (GxB_HYPER_SWITCH, GxB_HYPER_DEFAULT) ;
// GxB_Global_Option_set (GxB_BITMAP_SWITCH, NULL) ;
// GxB_Global_Option_set (GxB_FORMAT, GxB_FORMAT_DEFAULT) ;
//
// The compile-time constants GxB_HYPER_DEFAULT and GxB_FORMAT_DEFAULT are
// equal to 0.0625 and GxB_BY_ROW, by default. That is, by default, all new
// matrices are held by row in CSR format. If a matrix has fewer than n/16
// columns, it can be converted to hypersparse structure. If it has more than
// n/8 columns, it can be converted to a sparse structure. Modifying these
// global settings via GxB_Global_Option_set has no effect on matrices already
// created.
GB_PUBLIC
GrB_Info GxB_Global_Option_set // set a global default option
(
GxB_Option_Field field, // option to change
... // value to change it to
) ;
GB_PUBLIC
GrB_Info GxB_Global_Option_get // gets the current global default option
(
GxB_Option_Field field, // option to query
... // return value of the global option
) ;
//------------------------------------------------------------------------------
// GxB_set and GxB_get
//------------------------------------------------------------------------------
// The simplest way to set/get a value of a GrB_Descriptor is with
// the generic GxB_set and GxB_get functions:
// GxB_set (desc, field, value) ;
// GxB_get (desc, field, &value) ;
// GxB_set and GxB_get are generic methods that and set or query the options in
// a GrB_Matrix, a GrB_Descriptor, or in the global options. They can be used
// with the following syntax. Note that GxB_NTHREADS can be used for both the
// global nthreads_max, and for the # of threads in the descriptor.
// To set/get the global options:
//
// GxB_set (GxB_HYPER_SWITCH, double h) ;
// GxB_set (GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ;
// GxB_set (GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ;
// GxB_get (GxB_HYPER_SWITCH, double *h) ;
//
// double b [GxB_NBITMAP_SWITCH] ;
// GxB_set (GxB_BITMAP_SWITCH, b) ;
// GxB_set (GxB_BITMAP_SWITCH, NULL) ; // set defaults
// GxB_get (GxB_BITMAP_SWITCH, b) ;
//
// GxB_set (GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GxB_NTHREADS, nthreads_max) ;
// GxB_get (GxB_NTHREADS, int *nthreads_max) ;
//
// GxB_set (GxB_CHUNK, double chunk) ;
// GxB_get (GxB_CHUNK, double *chunk) ;
//
// GxB_set (GxB_BURBLE, bool burble) ;
// GxB_get (GxB_BURBLE, bool *burble) ;
//
// GxB_set (GxB_PRINTF, void *printf_function) ;
// GxB_get (GxB_PRINTF, void **printf_function) ;
//
// GxB_set (GxB_FLUSH, void *flush_function) ;
// GxB_get (GxB_FLUSH, void **flush_function) ;
//
// int64_t free_pool_limit [64] ;
// GxB_set (GxB_MEMORY_POOL, free_pool_limit) ;
// GxB_set (GxB_MEMORY_POOL, NULL) ; // set defaults
// GxB_get (GxB_MEMORY_POOL, free_pool_limit) ;
// To get global options that can be queried but not modified:
//
// GxB_get (GxB_MODE, GrB_Mode *mode) ;
// To set/get a matrix option:
//
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, double h) ;
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ;
// GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ;
// GxB_get (GrB_Matrix A, GxB_HYPER_SWITCH, double *h) ;
//
// GxB_set (GrB_Matrix A, GxB_BITMAP_SWITCH, double b) ;
// GxB_get (GrB_Matrix A, GxB_BITMAP_SWITCH, double *b) ;
//
// GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GrB_Matrix A, GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ;
// GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, scontrol) ;
// GxB_get (GrB_Matrix A, GxB_SPARSITY_CONTROL, int *scontrol) ;
//
// GxB_get (GrB_Matrix A, GxB_SPARSITY_STATUS, int *sparsity) ;
// To set/get a vector option or status:
//
// GxB_set (GrB_Vector v, GxB_BITMAP_SWITCH, double b) ;
// GxB_get (GrB_Vector v, GxB_BITMAP_SWITCH, double *b) ;
//
// GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_ROW) ;
// GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_COL) ;
// GxB_get (GrB_Vector v, GxB_FORMAT, GxB_Format_Value *s) ;
//
// GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ;
// GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, scontrol) ;
// GxB_get (GrB_Vector v, GxB_SPARSITY_CONTROL, int *scontrol) ;
//
// GxB_get (GrB_Vector v, GxB_SPARSITY_STATUS, int *sparsity) ;
// To set/get a descriptor field:
//
// GxB_set (GrB_Descriptor d, GrB_OUTP, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_OUTP, GrB_REPLACE) ;
// GxB_get (GrB_Descriptor d, GrB_OUTP, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_MASK, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_STRUCTURE) ;
// GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP + GrB_STRUCTURE) ;
// GxB_get (GrB_Descriptor d, GrB_MASK, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_INP0, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_INP0, GrB_TRAN) ;
// GxB_get (GrB_Descriptor d, GrB_INP0, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GrB_INP1, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GrB_INP1, GrB_TRAN) ;
// GxB_get (GrB_Descriptor d, GrB_INP1, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_DEFAULT) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_GUSTAVSON) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_HASH) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_SAXPY) ;
// GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_DOT) ;
// GxB_get (GrB_Descriptor d, GrB_AxB_METHOD, GrB_Desc_Value *v) ;
//
// GxB_set (GrB_Descriptor d, GxB_NTHREADS, nthreads) ;
// GxB_get (GrB_Descriptor d, GxB_NTHREADS, int *nthreads) ;
//
// GxB_set (GrB_Descriptor d, GxB_CHUNK, double chunk) ;
// GxB_get (GrB_Descriptor d, GxB_CHUNK, double *chunk) ;
//
// GxB_set (GrB_Descriptor d, GxB_SORT, int sort) ;
// GxB_get (GrB_Descriptor d, GxB_SORT, int *sort) ;
//
// GxB_set (GrB_Descriptor d, GxB_COMPRESSION, int method) ;
// GxB_get (GrB_Descriptor d, GxB_COMPRESSION, int *method) ;
//
// GxB_set (GrB_Descriptor d, GxB_IMPORT, int method) ;
// GxB_get (GrB_Descriptor d, GxB_IMPORT, int *method) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_set(arg1,...) \
_Generic \
( \
(arg1), \
int : GxB_Global_Option_set , \
GxB_Option_Field : GxB_Global_Option_set , \
GrB_Vector : GxB_Vector_Option_set , \
GrB_Matrix : GxB_Matrix_Option_set , \
GrB_Descriptor : GxB_Desc_set \
) \
(arg1, __VA_ARGS__)
#define GxB_get(arg1,...) \
_Generic \
( \
(arg1), \
const int : GxB_Global_Option_get , \
int : GxB_Global_Option_get , \
const GxB_Option_Field : GxB_Global_Option_get , \
GxB_Option_Field : GxB_Global_Option_get , \
const GrB_Vector : GxB_Vector_Option_get , \
GrB_Vector : GxB_Vector_Option_get , \
const GrB_Matrix : GxB_Matrix_Option_get , \
GrB_Matrix : GxB_Matrix_Option_get , \
const GrB_Descriptor : GxB_Desc_get , \
GrB_Descriptor : GxB_Desc_get \
) \
(arg1, __VA_ARGS__)
#endif
//==============================================================================
// GrB_free: free any GraphBLAS object
//==============================================================================
// for null and invalid objects
#define GrB_NULL NULL
#define GrB_INVALID_HANDLE NULL
#if GxB_STDC_VERSION >= 201112L
#define GrB_free(object) \
_Generic \
( \
(object), \
GrB_Type *: GrB_Type_free , \
GrB_UnaryOp *: GrB_UnaryOp_free , \
GrB_BinaryOp *: GrB_BinaryOp_free , \
GxB_SelectOp *: GxB_SelectOp_free , \
GrB_IndexUnaryOp *: GrB_IndexUnaryOp_free , \
GrB_Monoid *: GrB_Monoid_free , \
GrB_Semiring *: GrB_Semiring_free , \
GrB_Scalar *: GrB_Scalar_free , \
GrB_Vector *: GrB_Vector_free , \
GrB_Matrix *: GrB_Matrix_free , \
GrB_Descriptor *: GrB_Descriptor_free , \
GxB_Iterator *: GxB_Iterator_free \
) \
(object)
#endif
//==============================================================================
// GrB_wait: finish computations
//==============================================================================
typedef enum
{
GrB_COMPLETE = 0, // establishes a happens-before relation
GrB_MATERIALIZE = 1 // object is complete
}
GrB_WaitMode ;
// Finish all pending work in a specific object.
GB_PUBLIC GrB_Info GrB_Type_wait (GrB_Type type , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_UnaryOp_wait (GrB_UnaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_BinaryOp_wait (GrB_BinaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GxB_SelectOp_wait (GxB_SelectOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_IndexUnaryOp_wait (GrB_IndexUnaryOp op , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Monoid_wait (GrB_Monoid monoid , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Semiring_wait (GrB_Semiring semiring, GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Descriptor_wait (GrB_Descriptor desc , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Scalar_wait (GrB_Scalar s , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Vector_wait (GrB_Vector v , GrB_WaitMode waitmode) ;
GB_PUBLIC GrB_Info GrB_Matrix_wait (GrB_Matrix A , GrB_WaitMode waitmode) ;
// GrB_wait (object,waitmode) polymorphic function:
#if GxB_STDC_VERSION >= 201112L
#define GrB_wait(object,waitmode) \
_Generic \
( \
(object), \
GrB_Type : GrB_Type_wait , \
GrB_UnaryOp : GrB_UnaryOp_wait , \
GrB_BinaryOp : GrB_BinaryOp_wait , \
GxB_SelectOp : GxB_SelectOp_wait , \
GrB_IndexUnaryOp : GrB_IndexUnaryOp_wait , \
GrB_Monoid : GrB_Monoid_wait , \
GrB_Semiring : GrB_Semiring_wait , \
GrB_Scalar : GrB_Scalar_wait , \
GrB_Vector : GrB_Vector_wait , \
GrB_Matrix : GrB_Matrix_wait , \
GrB_Descriptor : GrB_Descriptor_wait \
) \
(object, waitmode)
#endif
// NOTE: GxB_Scalar_wait is historical; use GrB_Scalar_wait instead
GB_PUBLIC GrB_Info GxB_Scalar_wait (GrB_Scalar *s) ;
//==============================================================================
// GrB_error: error handling
//==============================================================================
// Each GraphBLAS method and operation returns a GrB_Info error code.
// GrB_error returns additional information on the error in a thread-safe
// null-terminated string. The string returned by GrB_error is owned by
// the GraphBLAS library and must not be free'd.
GB_PUBLIC GrB_Info GrB_Type_error (const char **error, const GrB_Type type) ;
GB_PUBLIC GrB_Info GrB_UnaryOp_error (const char **error, const GrB_UnaryOp op) ;
GB_PUBLIC GrB_Info GrB_BinaryOp_error (const char **error, const GrB_BinaryOp op) ;
GB_PUBLIC GrB_Info GxB_SelectOp_error (const char **error, const GxB_SelectOp op) ;
GB_PUBLIC GrB_Info GrB_IndexUnaryOp_error (const char **error, const GrB_IndexUnaryOp op) ;
GB_PUBLIC GrB_Info GrB_Monoid_error (const char **error, const GrB_Monoid monoid) ;
GB_PUBLIC GrB_Info GrB_Semiring_error (const char **error, const GrB_Semiring semiring) ;
GB_PUBLIC GrB_Info GrB_Scalar_error (const char **error, const GrB_Scalar s) ;
GB_PUBLIC GrB_Info GrB_Vector_error (const char **error, const GrB_Vector v) ;
GB_PUBLIC GrB_Info GrB_Matrix_error (const char **error, const GrB_Matrix A) ;
GB_PUBLIC GrB_Info GrB_Descriptor_error (const char **error, const GrB_Descriptor d) ;
// GxB_Scalar_error is historical: use GrB_Scalar_error instead
GB_PUBLIC GrB_Info GxB_Scalar_error (const char **error, const GrB_Scalar s) ;
// GrB_error (error,object) polymorphic function:
#if GxB_STDC_VERSION >= 201112L
#define GrB_error(error,object) \
_Generic \
( \
(object), \
const GrB_Type : GrB_Type_error , \
GrB_Type : GrB_Type_error , \
const GrB_UnaryOp : GrB_UnaryOp_error , \
GrB_UnaryOp : GrB_UnaryOp_error , \
const GrB_BinaryOp : GrB_BinaryOp_error , \
GrB_BinaryOp : GrB_BinaryOp_error , \
const GxB_SelectOp : GxB_SelectOp_error , \
GxB_SelectOp : GxB_SelectOp_error , \
const GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \
GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \
const GrB_Monoid : GrB_Monoid_error , \
GrB_Monoid : GrB_Monoid_error , \
const GrB_Semiring : GrB_Semiring_error , \
GrB_Semiring : GrB_Semiring_error , \
const GrB_Scalar : GrB_Scalar_error , \
GrB_Scalar : GrB_Scalar_error , \
const GrB_Vector : GrB_Vector_error , \
GrB_Vector : GrB_Vector_error , \
const GrB_Matrix : GrB_Matrix_error , \
GrB_Matrix : GrB_Matrix_error , \
const GrB_Descriptor : GrB_Descriptor_error , \
GrB_Descriptor : GrB_Descriptor_error \
) \
(error, object)
#endif
//==============================================================================
// GrB_mxm, vxm, mxv: matrix multiplication over a semiring
//==============================================================================
GB_PUBLIC
GrB_Info GrB_mxm // C<Mask> = accum (C, A*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '+' and '*' for A*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_vxm // w'<Mask> = accum (w, u'*A)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' and '*' for u'*A
const GrB_Vector u, // first input: vector u
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_mxv // w<Mask> = accum (w, A*u)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' and '*' for A*B
const GrB_Matrix A, // first input: matrix A
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//==============================================================================
// GrB_eWiseMult: element-wise matrix and vector operations, set intersection
//==============================================================================
// GrB_eWiseMult computes C<Mask> = accum (C, A.*B), where ".*" is the Hadamard
// product, and where pairs of elements in two matrices (or vectors) are
// pairwise "multiplied" with C(i,j) = mult (A(i,j),B(i,j)).
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_Semiring // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_Monoid // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseMult_BinaryOp // w<Mask> = accum (w, u.*v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp mult, // defines '.*' for t=u.*v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_Semiring // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_Monoid // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseMult_BinaryOp // C<Mask> = accum (C, A.*B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp mult, // defines '.*' for T=A.*B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
// All 6 of the above type-specific functions are captured in a single
// type-generic function, GrB_eWiseMult:
#if GxB_STDC_VERSION >= 201112L
#define GrB_eWiseMult(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(C), \
GrB_Matrix : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \
GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \
const GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \
GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \
const GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp \
), \
GrB_Vector : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \
GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \
const GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \
GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \
const GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp , \
GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp \
) \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GrB_eWiseAdd: element-wise matrix and vector operations, set union
//==============================================================================
// GrB_eWiseAdd computes C<Mask> = accum (C, A+B), where pairs of elements in
// two matrices (or two vectors) are pairwise "added".
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_Semiring // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Semiring semiring, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_Monoid // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_eWiseAdd_BinaryOp // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp add, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Vector v, // second input: vector v
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_Semiring // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_Monoid // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_eWiseAdd_BinaryOp // C<Mask> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp add, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_eWiseAdd(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(C), \
GrB_Matrix : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \
GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \
const GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \
GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \
const GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp \
), \
GrB_Vector : \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \
GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \
const GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \
GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \
const GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp , \
GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp \
) \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GxB_eWiseUnion: a variant of GrB_eWiseAdd
//==============================================================================
// GxB_eWiseUnion is a variant of eWiseAdd. The methods create a result with
// the same sparsity structure. They differ when an entry is present in A but
// not B, or in B but not A.
// eWiseAdd does the following, for a matrix, where "+" is the add binary op:
// if A(i,j) and B(i,j) are both present:
// C(i,j) = A(i,j) + B(i,j)
// else if A(i,j) is present but not B(i,j)
// C(i,j) = A(i,j)
// else if B(i,j) is present but not A(i,j)
// C(i,j) = B(i,j)
// by contrast, eWiseUnion always applies the operator:
// if A(i,j) and B(i,j) are both present:
// C(i,j) = A(i,j) + B(i,j)
// else if A(i,j) is present but not B(i,j)
// C(i,j) = A(i,j) + beta
// else if B(i,j) is present but not A(i,j)
// C(i,j) = alpha + B(i,j)
GB_PUBLIC
GrB_Info GxB_Vector_eWiseUnion // w<mask> = accum (w, u+v)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp add, // defines '+' for t=u+v
const GrB_Vector u, // first input: vector u
const GrB_Scalar alpha,
const GrB_Vector v, // second input: vector v
const GrB_Scalar beta,
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_eWiseUnion // C<M> = accum (C, A+B)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp add, // defines '+' for T=A+B
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar alpha,
const GrB_Matrix B, // second input: matrix B
const GrB_Scalar beta,
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_eWiseUnion(C,Mask,accum,op,A,alpha,B,beta,desc) \
_Generic \
( \
(C), \
const GrB_Matrix : GxB_Matrix_eWiseUnion , \
GrB_Matrix : GxB_Matrix_eWiseUnion , \
const GrB_Vector : GxB_Vector_eWiseUnion , \
GrB_Vector : GxB_Vector_eWiseUnion \
) \
(C, Mask, accum, op, A, alpha, B, beta, desc)
#endif
//==============================================================================
// GrB_extract: extract a submatrix or subvector
//==============================================================================
// Extract entries from a matrix or vector; T = A(I,J). This (like most
// GraphBLAS methods) is then followed by C<Mask>=accum(C,T).
// To extract all rows of a matrix or vector, as in A (:,J), use I=GrB_ALL as
// the input argument. For all columns of a matrix, use J=GrB_ALL.
GB_PUBLIC const uint64_t *GrB_ALL ;
// To extract a range of rows and columns, I and J can be a list of 2 or 3
// indices that defines a range (begin:end) or a strided range (begin:inc:end).
// To specify the colon syntax I = begin:end, the array I has size at least 2,
// where I [GxB_BEGIN] = begin and I [GxB_END] = end. The parameter ni is then
// passed as the special value GxB_RANGE. To specify the colon syntax I =
// begin:inc:end, the array I has size at least three, with the values begin,
// end, and inc (in that order), and then pass in the value ni = GxB_STRIDE.
// The same can be done for the list J and its size, nj.
// These special values of ni and nj can be used for GrB_assign,
// GrB_extract, and GxB_subassign.
#define GxB_RANGE (INT64_MAX)
#define GxB_STRIDE (INT64_MAX-1)
#define GxB_BACKWARDS (INT64_MAX-2)
// for the strided range begin:inc:end, I [GxB_BEGIN] is the value of begin, I
// [GxB_END] is the value end, I [GxB_INC] is the magnitude of the stride. If
// the stride is negative, use ni = GxB_BACKWARDS.
#define GxB_BEGIN (0)
#define GxB_END (1)
#define GxB_INC (2)
// For example, the notation 10:-2:1 defines a sequence [10 8 6 4 2].
// The end point of the sequence (1) need not appear in the sequence, if
// the last increment goes past it. To specify the same in GraphBLAS,
// use:
// GrB_Index I [3], ni = GxB_BACKWARDS ;
// I [GxB_BEGIN ] = 10 ; // the start of the sequence
// I [GxB_INC ] = 2 ; // the magnitude of the increment
// I [GxB_END ] = 1 ; // the end of the sequence
GB_PUBLIC
GrB_Info GrB_Vector_extract // w<mask> = accum (w, u(I))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_extract // C<Mask> = accum (C, A(I,J))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Col_extract // w<mask> = accum (w, A(I,j))
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//------------------------------------------------------------------------------
// GrB_extract: generic matrix/vector extraction
//------------------------------------------------------------------------------
// GrB_extract is a generic interface to the following functions:
// GrB_Vector_extract (w,mask,acc,u,I,ni,d) // w<m> = acc (w, u(I))
// GrB_Col_extract (w,mask,acc,A,I,ni,j,d) // w<m> = acc (w, A(I,j))
// GrB_Matrix_extract (C,Mask,acc,A,I,ni,J,nj,d) // C<Mask> = acc (C, A(I,J))
#if GxB_STDC_VERSION >= 201112L
#define GrB_extract(arg1,Mask,accum,arg4,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
const GrB_Vector : GrB_Vector_extract , \
GrB_Vector : GrB_Vector_extract , \
const GrB_Matrix : GrB_Col_extract , \
GrB_Matrix : GrB_Col_extract \
), \
GrB_Matrix : GrB_Matrix_extract \
) \
(arg1, Mask, accum, arg4, __VA_ARGS__)
#endif
//==============================================================================
// GxB_subassign: matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A)
//==============================================================================
// Assign entries in a matrix or vector; C(I,J) = A.
// Each GxB_subassign function is very similar to its corresponding GrB_assign
// function in the spec, but they differ in two ways: (1) the mask in
// GxB_subassign has the same size as w(I) for vectors and C(I,J) for matrices,
// and (2) they differ in the GrB_REPLACE option. See the user guide for
// details.
// In GraphBLAS notation, the two methods can be described as follows:
// matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A)
// matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A)
// --- assign ------------------------------------------------------------------
//
// GrB_Matrix_assign C<M>(I,J) += A M same size as matrix C.
// A is |I|-by-|J|
//
// GrB_Vector_assign w<m>(I) += u m same size as column vector w.
// u is |I|-by-1
//
// GrB_Row_assign C<m'>(i,J) += u' m is a column vector the same
// size as a row of C.
// u is |J|-by-1, i is a scalar.
//
// GrB_Col_assign C<m>(I,j) += u m is a column vector the same
// size as a column of C.
// u is |I|-by-1, j is a scalar.
//
// --- subassign ---------------------------------------------------------------
//
// GxB_Matrix_subassign C(I,J)<M> += A M same size as matrix A.
// A is |I|-by-|J|
//
// GxB_Vector_subassign w(I)<m> += u m same size as column vector u.
// u is |I|-by-1
//
// GxB_Row_subassign C(i,J)<m'> += u' m same size as column vector u.
// u is |J|-by-1, i is a scalar.
//
// GxB_Col_subassign C(I,j)<m> += u m same size as column vector u.
// u is |I|-by-1, j is a scalar.
GB_PUBLIC
GrB_Info GxB_Vector_subassign // w(I)<mask> = accum (w(I),u)
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign // C(I,J)<Mask> = accum (C(I,J),A)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J), Mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Col_subassign // C(I,j)<mask> = accum (C(I,j),u)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(I,j), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t)
const GrB_Vector u, // input vector
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for C(I,j) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Row_subassign // C(i,J)<mask'> = accum (C(i,J),u')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(i,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t)
const GrB_Vector u, // input vector
GrB_Index i, // row index
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(i,J) and mask
) ;
//------------------------------------------------------------------------------
// GxB_Vector_subassign_[SCALAR]: scalar expansion assignment to subvector
//------------------------------------------------------------------------------
// Assigns a single scalar to a subvector, w(I)<mask> = accum(w(I),x). The
// scalar x is implicitly expanded into a vector u of size ni-by-1, with each
// entry in u equal to x, and then w(I)<mask> = accum(w(I),u) is done.
GB_PUBLIC
GrB_Info GxB_Vector_subassign_BOOL // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x)
bool x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT8 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT8 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT16 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT16 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_INT64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UINT64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FP32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
float x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FP64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
double x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FC32 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_FC64 // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_UDT // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
void *x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_subassign_Scalar // w(I)<mask> = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w(I), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GrB_Scalar x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w(I) and mask
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_subassign_[SCALAR]: scalar expansion assignment to submatrix
//------------------------------------------------------------------------------
// Assigns a single scalar to a submatrix, C(I,J)<Mask> = accum(C(I,J),x). The
// scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each
// entry in A equal to x, and then C(I,J)<Mask> = accum(C(I,J),A) is done.
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_BOOL // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
bool x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT8 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT8 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT16 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT16 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_INT64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UINT64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FP32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
float x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FP64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
double x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FC32 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_FC64 // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_UDT // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
void *x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_subassign_Scalar // C(I,J)<Mask> = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GrB_Scalar x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(I,J) and Mask
) ;
//------------------------------------------------------------------------------
// GxB_subassign: generic submatrix/subvector assignment
//------------------------------------------------------------------------------
// GxB_subassign is a generic function that provides access to all specific
// GxB_*_subassign* functions:
// GxB_Vector_subassign (w,m,acc,u,I,ni,d) // w(I)<m> = acc(w(I),u)
// GxB_Matrix_subassign (C,M,acc,A,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),A)
// GxB_Col_subassign (C,m,acc,u,I,ni,j,d) // C(I,j)<m> = acc(C(I,j),u)
// GxB_Row_subassign (C,m,acc,u,i,J,nj,d) // C(i,J)<m'> = acc(C(i,J),u')
// GxB_Vector_subassign_T (w,m,acc,x,I,ni,d) // w(I)<m> = acc(w(I),x)
// GxB_Matrix_subassign_T (C,M,acc,x,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),x)
#if GxB_STDC_VERSION >= 201112L
#define GxB_subassign(arg1,Mask,accum,arg4,arg5,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
GB_CASES (, GxB, Vector_subassign) , \
const GrB_Scalar : GxB_Vector_subassign_Scalar, \
GrB_Scalar : GxB_Vector_subassign_Scalar, \
default: GxB_Vector_subassign \
), \
default: \
_Generic \
( \
(arg4), \
GB_CASES (, GxB, Matrix_subassign) , \
const GrB_Scalar : GxB_Matrix_subassign_Scalar, \
GrB_Scalar : GxB_Matrix_subassign_Scalar, \
const GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GxB_Col_subassign , \
GrB_Index *: GxB_Col_subassign , \
default: GxB_Row_subassign \
), \
GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GxB_Col_subassign , \
GrB_Index *: GxB_Col_subassign , \
default: GxB_Row_subassign \
), \
default: GxB_Matrix_subassign \
) \
) \
(arg1, Mask, accum, arg4, arg5, __VA_ARGS__)
#endif
//==============================================================================
// GrB_assign: matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A)
//==============================================================================
// Assign entries in a matrix or vector; C(I,J) = A.
// Each of these can be used with their generic name, GrB_assign.
GB_PUBLIC
GrB_Info GrB_Vector_assign // w<mask>(I) = accum (w(I),u)
(
GrB_Vector w, // input/output matrix for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t)
const GrB_Vector u, // first input: vector u
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign // C<Mask>(I,J) = accum (C(I,J),A)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T)
const GrB_Matrix A, // first input: matrix A
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Col_assign // C<mask>(I,j) = accum (C(I,j),u)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(:,j), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t)
const GrB_Vector u, // input vector
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
GrB_Index j, // column index
const GrB_Descriptor desc // descriptor for C(:,j) and mask
) ;
GB_PUBLIC
GrB_Info GrB_Row_assign // C<mask'>(i,J) = accum (C(i,J),u')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Vector mask, // optional mask for C(i,:), unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t)
const GrB_Vector u, // input vector
GrB_Index i, // row index
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C(i,:) and mask
) ;
//------------------------------------------------------------------------------
// GrB_Vector_assign_[SCALAR]: scalar expansion assignment to subvector
//------------------------------------------------------------------------------
// Assigns a single scalar to a subvector, w<mask>(I) = accum(w(I),x). The
// scalar x is implicitly expanded into a vector u of size ni-by-1, with each
// entry in u equal to x, and then w<mask>(I) = accum(w(I),u) is done.
GB_PUBLIC
GrB_Info GrB_Vector_assign_BOOL // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x)
bool x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT8 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT8 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint8_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT16 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT16 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint16_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_INT64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
int64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UINT64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
uint64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_FP32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
float x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_FP64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
double x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_assign_FC32 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC32_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_assign_FC64 // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GxB_FC64_t x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_UDT // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
void *x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_assign_Scalar // w<mask>(I) = accum (w(I),x)
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x)
GrB_Scalar x, // scalar to assign to w(I)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Descriptor desc // descriptor for w and mask
) ;
//------------------------------------------------------------------------------
// GrB_Matrix_assign_[SCALAR]: scalar expansion assignment to submatrix
//------------------------------------------------------------------------------
// Assigns a single scalar to a submatrix, C<Mask>(I,J) = accum(C(I,J),x). The
// scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each
// entry in A equal to x, and then C<Mask>(I,J) = accum(C(I,J),A) is done.
GB_PUBLIC
GrB_Info GrB_Matrix_assign_BOOL // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
bool x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT8 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT8 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint8_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT16 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT16 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint16_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_INT64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
int64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UINT64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
uint64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_FP32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
float x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_FP64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
double x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_assign_FC32 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC32_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_assign_FC64 // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GxB_FC64_t x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_UDT // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
void *x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_assign_Scalar // C<Mask>(I,J) = accum (C(I,J),x)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x)
GrB_Scalar x, // scalar to assign to C(I,J)
const GrB_Index *I, // row indices
GrB_Index ni, // number of row indices
const GrB_Index *J, // column indices
GrB_Index nj, // number of column indices
const GrB_Descriptor desc // descriptor for C and Mask
) ;
//------------------------------------------------------------------------------
// GrB_assign: generic submatrix/subvector assignment
//------------------------------------------------------------------------------
// GrB_assign is a generic function that provides access to all specific
// GrB_*_assign* functions:
// GrB_Vector_assign_T (w,m,acc,x,I,ni,d) // w<m>(I) = acc(w(I),x)
// GrB_Vector_assign (w,m,acc,u,I,ni,d) // w<m>(I) = acc(w(I),u)
// GrB_Matrix_assign_T (C,M,acc,x,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),x)
// GrB_Col_assign (C,m,acc,u,I,ni,j,d) // C<m>(I,j) = acc(C(I,j),u)
// GrB_Row_assign (C,m,acc,u,i,J,nj,d) // C<m'>(i,J) = acc(C(i,J),u')
// GrB_Matrix_assign (C,M,acc,A,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),A)
#if GxB_STDC_VERSION >= 201112L
#define GrB_assign(arg1,Mask,accum,arg4,arg5,...) \
_Generic \
( \
(arg1), \
GrB_Vector : \
_Generic \
( \
(arg4), \
GB_CASES (, GrB, Vector_assign) , \
const GrB_Scalar : GrB_Vector_assign_Scalar , \
GrB_Scalar : GrB_Vector_assign_Scalar , \
default: GrB_Vector_assign \
), \
default: \
_Generic \
( \
(arg4), \
GB_CASES (, GrB, Matrix_assign) , \
const GrB_Scalar : GrB_Matrix_assign_Scalar , \
GrB_Scalar : GrB_Matrix_assign_Scalar , \
const GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GrB_Col_assign , \
GrB_Index *: GrB_Col_assign , \
default: GrB_Row_assign \
), \
GrB_Vector : \
_Generic \
( \
(arg5), \
const GrB_Index *: GrB_Col_assign , \
GrB_Index *: GrB_Col_assign , \
default: GrB_Row_assign \
), \
default: GrB_Matrix_assign \
) \
) \
(arg1, Mask, accum, arg4, arg5, __VA_ARGS__)
#endif
//==============================================================================
// GrB_apply: matrix and vector apply
//==============================================================================
// Apply a unary, index_unary, or binary operator to entries in a matrix or
// vector, C<M> = accum (C, op (A)).
GB_PUBLIC
GrB_Info GrB_Vector_apply // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_UnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply // C<Mask> = accum (C, op(A)) or op(A')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_UnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// vector apply: binaryop variants (bind 1st)
//-------------------------------------------
// Apply a binary operator to the entries in a vector, binding the first
// input to a scalar x, w<mask> = accum (w, op (x,u)).
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_Scalar // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
// historical: identical to GxB_Vector_apply_BinaryOp1st
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_BOOL // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
bool x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT8 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int8_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT16 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int16_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_INT64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
int64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT8 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint8_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT16 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint16_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UINT64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
uint64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_FP32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
float x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_FP64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
double x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st_FC32 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC32_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp1st_FC64 // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC64_t x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp1st_UDT // w<mask> = accum (w, op(x,u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const void *x, // first input: scalar x
const GrB_Vector u, // second input: vector u
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// vector apply: binaryop variants (bind 2nd)
//-------------------------------------------
// Apply a binary operator to the entries in a vector, binding the second
// input to a scalar y, w<mask> = accum (w, op (u,y)).
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_Scalar // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
// historical: identical to GrB_Vector_apply_BinaryOp2nd_Scalar
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_BOOL // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT8 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT16 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_INT64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT8 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT16 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_FP32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_FP64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd_FC32 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_BinaryOp2nd_FC64 // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_BinaryOp2nd_UDT // w<mask> = accum (w, op(u,y))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// vector apply: IndexUnaryOp variants
//-------------------------------------------
// Apply a GrB_IndexUnaryOp to the entries in a vector
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_Scalar // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_BOOL // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_INT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UINT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_FP32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_FP64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_IndexOp_FC32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_apply_IndexOp_FC64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_apply_IndexOp_UDT // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// matrix apply: binaryop variants (bind 1st)
//-------------------------------------------
// Apply a binary operator to the entries in a matrix, binding the first input
// to a scalar x, C<Mask> = accum (C, op (x,A)), or op(x,A').
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_Scalar // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// historical: identical to GrB_Matrix_apply_BinaryOp1st_Scalar
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Scalar x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_BOOL // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
bool x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT8 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int8_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT16 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int16_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_INT64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
int64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT8 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint8_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT16 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint16_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
uint64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_FP32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
float x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_FP64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
double x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st_FC32 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC32_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp1st_FC64 // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
GxB_FC64_t x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp1st_UDT // C<M>=accum(C,op(x,A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const void *x, // first input: scalar x
const GrB_Matrix A, // second input: matrix A
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// matrix apply: binaryop variants (bind 2nd)
//-------------------------------------------
// Apply a binary operator to the entries in a matrix, binding the second input
// to a scalar y, C<Mask> = accum (C, op (A,y)), or op(A',y).
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_Scalar // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// historical: identical to GrB_Matrix_apply_BinaryOp2nd_Scalar
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_BOOL // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT8 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT16 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT8 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT16 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC32 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC64 // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_BinaryOp2nd_UDT // C<M>=accum(C,op(A,y))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//-------------------------------------------
// matrix apply: IndexUnaryOp variants
//-------------------------------------------
// Apply a GrB_IndexUnaryOp to the entries in a matrix.
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_Scalar // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_BOOL // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_INT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UINT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_FP32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_FP64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_IndexOp_FC32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_apply_IndexOp_FC64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_apply_IndexOp_UDT // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
//------------------------------------------------------------------------------
// GrB_apply: generic matrix/vector apply
//------------------------------------------------------------------------------
// GrB_apply is a generic function for applying a unary operator to a matrix
// or vector and provides access to these functions:
// GrB_Vector_apply (w,mask,acc,op,u,d) // w<mask> = accum (w, op(u))
// GrB_Matrix_apply (C,Mask,acc,op,A,d) // C<Mask> = accum (C, op(A))
// GrB_Vector_apply (w,m,acc,unop ,u,d)
// GrB_Vector_apply_BinaryOp1st_TYPE (w,m,acc,binop,x,u,d)
// GrB_Vector_apply_BinaryOp2nd_TYPE (w,m,acc,binop,u,y,d)
// GrB_Vector_apply_IndexOp_TYPE (w,m,acc,idxop,u,y,d)
// GrB_Matrix_apply (C,M,acc,unop ,A,d)
// GrB_Matrix_apply_BinaryOp1st_TYPE (C,M,acc,binop,x,A,d)
// GrB_Matrix_apply_BinaryOp2nd_TYPE (C,M,acc,binop,A,y,d)
// GrB_Matrix_apply_IndexOp_TYPE (C,M,acc,idxop,A,y,d)
#if GxB_STDC_VERSION >= 201112L
#define GB_BIND(kind,x,y,...) \
_Generic \
( \
(x), \
const GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \
GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \
GB_CASES (, GrB, GB_CONCAT ( kind, _apply_BinaryOp1st,, )) , \
default: \
_Generic \
( \
(y), \
GB_CASES (, GrB, GB_CONCAT ( kind , _apply_BinaryOp2nd,, )), \
default: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp2nd_Scalar) \
) \
)
#define GB_IDXOP(kind,A,y,...) \
_Generic \
( \
(y), \
GB_CASES (, GrB, GB_CONCAT ( kind, _apply_IndexOp,, )), \
default: GB_CONCAT ( GrB, _, kind, _apply_IndexOp_Scalar) \
)
#define GrB_apply(C,Mask,accum,op,...) \
_Generic \
( \
(C), \
GrB_Vector : \
_Generic \
( \
(op), \
GrB_UnaryOp : GrB_Vector_apply , \
GrB_BinaryOp : GB_BIND (Vector, __VA_ARGS__), \
GrB_IndexUnaryOp : GB_IDXOP (Vector, __VA_ARGS__) \
), \
GrB_Matrix : \
_Generic \
( \
(op), \
GrB_UnaryOp : GrB_Matrix_apply , \
GrB_BinaryOp : GB_BIND (Matrix, __VA_ARGS__), \
GrB_IndexUnaryOp : GB_IDXOP (Matrix, __VA_ARGS__) \
) \
) \
(C, Mask, accum, op, __VA_ARGS__)
#endif
//==============================================================================
// GrB_select: matrix and vector selection using an IndexUnaryOp
//==============================================================================
//-------------------------------------------
// vector select using an IndexUnaryOp
//-------------------------------------------
GB_PUBLIC
GrB_Info GrB_Vector_select_Scalar // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_BOOL // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_INT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT8 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT16 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UINT64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_FP32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_FP64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_select_FC32 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Vector_select_FC64 // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GrB_Vector_select_UDT // w<mask> = accum (w, op(u))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for w and mask
) ;
//-------------------------------------------
// matrix select using an IndexUnaryOp
//-------------------------------------------
GB_PUBLIC
GrB_Info GrB_Matrix_select_Scalar // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_BOOL // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
bool y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_INT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
int64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT8 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint8_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT16 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint16_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UINT64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
uint64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_FP32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
float y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_FP64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
double y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select_FC32 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC32_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select_FC64 // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
GxB_FC64_t y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_select_UDT // C<M>=accum(C,op(A))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_IndexUnaryOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const void *y, // second input: scalar y
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
// GrB_select is a generic method that applies an IndexUnaryOp to
// a matrix or vector, using any type of the scalar y.
// GrB_Vector_select_TYPE (w,m,acc,idxop,u,y,d)
// GrB_Matrix_select_TYPE (C,M,acc,idxop,A,y,d)
#if GxB_STDC_VERSION >= 201112L
#define GrB_select(C,Mask,accum,op,x,y,d) \
_Generic \
( \
(C), \
GrB_Vector : \
_Generic \
( \
(y), \
GB_CASES (, GrB, Vector_select), \
default: GrB_Vector_select_Scalar \
), \
GrB_Matrix : \
_Generic \
( \
(y), \
GB_CASES (, GrB, Matrix_select), \
default: GrB_Matrix_select_Scalar \
) \
) \
(C, Mask, accum, op, x, y, d)
#endif
//==============================================================================
// GxB_select: matrix and vector selection (historical)
//==============================================================================
// GrB_select and with the GrB_IndexUnaryOp operators should be used instead.
GB_PUBLIC
GrB_Info GxB_Vector_select // w<mask> = accum (w, op(u,k))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GxB_SelectOp op, // operator to apply to the entries
const GrB_Vector u, // first input: vector u
const GrB_Scalar Thunk, // optional input for the select operator
const GrB_Descriptor desc // descriptor for w and mask
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_select // C<Mask> = accum (C, op(A,k)) or op(A',k)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GxB_SelectOp op, // operator to apply to the entries
const GrB_Matrix A, // first input: matrix A
const GrB_Scalar Thunk, // optional input for the select operator
const GrB_Descriptor desc // descriptor for C, mask, and A
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_select(C,Mask,accum,op,A,Thunk,desc) \
_Generic \
( \
(C), \
GrB_Vector : GxB_Vector_select , \
GrB_Matrix : GxB_Matrix_select \
) \
(C, Mask, accum, op, A, Thunk, desc)
#endif
//==============================================================================
// GrB_reduce: matrix and vector reduction
//==============================================================================
// Reduce the entries in a matrix to a vector, a column vector t such that
// t(i) = sum (A (i,:)), and where "sum" is a commutative and associative
// monoid with an identity value. A can be transposed, which reduces down the
// columns instead of the rows.
// For GrB_Matrix_reduce_BinaryOp, the GrB_BinaryOp op must correspond to a
// known built-in monoid:
//
// operator data-types (all built-in)
// ---------------------- ---------------------------
// MIN, MAX INT*, UINT*, FP*
// TIMES, PLUS INT*, UINT*, FP*, FC*
// ANY INT*, UINT*, FP*, FC*, BOOL
// LOR, LAND, LXOR, EQ BOOL
// BOR, BAND, BXOR, BXNOR UINT*
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_Monoid // w<mask> = accum (w,reduce(A))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_Monoid monoid, // reduce operator for t=reduce(A)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BinaryOp // w<mask> = accum (w,reduce(A))
(
GrB_Vector w, // input/output vector for results
const GrB_Vector mask, // optional mask for w, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(w,t)
const GrB_BinaryOp op, // reduce operator for t=reduce(A)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for w, mask, and A
) ;
//------------------------------------------------------------------------------
// reduce a vector to a scalar
//------------------------------------------------------------------------------
// Reduce entries in a vector to a scalar, c = accum (c, reduce_to_scalar(u))
GB_PUBLIC
GrB_Info GrB_Vector_reduce_BOOL // c = accum (c, reduce_to_scalar (u))
(
bool *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT8 // c = accum (c, reduce_to_scalar (u))
(
int8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT8 // c = accum (c, reduce_to_scalar (u))
(
uint8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT16 // c = accum (c, reduce_to_scalar (u))
(
int16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT16 // c = accum (c, reduce_to_scalar (u))
(
uint16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT32 // c = accum (c, reduce_to_scalar (u))
(
int32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT32 // c = accum (c, reduce_to_scalar (u))
(
uint32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_INT64 // c = accum (c, reduce_to_scalar (u))
(
int64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UINT64 // c = accum (c, reduce_to_scalar (u))
(
uint64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_FP32 // c = accum (c, reduce_to_scalar (u))
(
float *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_FP64 // c = accum (c, reduce_to_scalar (u))
(
double *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_reduce_FC32 // c = accum (c, reduce_to_scalar (u))
(
GxB_FC32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_reduce_FC64 // c = accum (c, reduce_to_scalar (u))
(
GxB_FC64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_UDT // c = accum (c, reduce_to_scalar (u))
(
void *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(u))
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Vector_reduce_BinaryOp_Scalar
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_BinaryOp op, // binary op to do the reduction
const GrB_Vector u, // vector to reduce
const GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// reduce a matrix to a scalar
//------------------------------------------------------------------------------
// Reduce entries in a matrix to a scalar, c = accum (c, reduce_to_scalar(A))
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BOOL // c = accum (c, reduce_to_scalar (A))
(
bool *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT8 // c = accum (c, reduce_to_scalar (A))
(
int8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT8 // c = accum (c, reduce_to_scalar (A))
(
uint8_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT16 // c = accum (c, reduce_to_scalar (A))
(
int16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT16 // c = accum (c, reduce_to_scalar (A))
(
uint16_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT32 // c = accum (c, reduce_to_scalar (A))
(
int32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT32 // c = accum (c, reduce_to_scalar (A))
(
uint32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_INT64 // c = accum (c, reduce_to_scalar (A))
(
int64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UINT64 // c = accum (c, reduce_to_scalar (A))
(
uint64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_FP32 // c = accum (c, reduce_to_scalar (A))
(
float *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_FP64 // c = accum (c, reduce_to_scalar (A))
(
double *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_reduce_FC32 // c = accum (c, reduce_to_scalar (A))
(
GxB_FC32_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_reduce_FC64 // c = accum (c, reduce_to_scalar (A))
(
GxB_FC64_t *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_UDT // c = accum (c, reduce_to_scalar (A))
(
void *c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(A))
(
GrB_Scalar c, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_Monoid monoid, // monoid to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_reduce_BinaryOp_Scalar
(
GrB_Scalar S, // result scalar
const GrB_BinaryOp accum, // optional accum for c=accum(c,t)
const GrB_BinaryOp op, // binary op to do the reduction
const GrB_Matrix A, // matrix to reduce
const GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GrB_reduce: generic matrix/vector reduction to a vector or scalar
//------------------------------------------------------------------------------
// GrB_reduce is a generic function that provides access to all GrB_*reduce*
// functions:
// reduce matrix to vector:
// GrB_Matrix_reduce_Monoid (w,mask,acc,mo,A,d) // w<mask> = acc (w,reduce(A))
// GrB_Matrix_reduce_BinaryOp (w,mask,acc,op,A,d) // w<mask> = acc (w,reduce(A))
// reduce matrix to scalar:
// GrB_Vector_reduce_[SCALAR] (c,acc,monoid,u,d) // c = acc (c,reduce(u))
// GrB_Matrix_reduce_[SCALAR] (c,acc,monoid,A,d) // c = acc (c,reduce(A))
// GrB_Vector_reduce_Monoid_Scalar (s,acc,monoid,u,d) // s = acc (s,reduce(u))
// GrB_Matrix_reduce_Monoid_Scalar (s,acc,monoid,A,d) // s = acc (s,reduce(A))
// GrB_Vector_reduce_BinaryOp_Scalar (s,acc,op,u,d) // s = acc (s,reduce(u))
// GrB_Matrix_reduce_BinaryOp_Scalar (s,acc,op,A,d) // s = acc (s,reduce(A))
#if GxB_STDC_VERSION >= 201112L
#define GB_REDUCE_TO_SCALAR(kind,c,op) \
_Generic \
( \
(c), \
GB_CASES (*, GrB, GB_CONCAT ( kind, _reduce,, )), \
default: \
_Generic \
( \
(op), \
const GrB_BinaryOp : \
GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\
GrB_BinaryOp : \
GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\
default: GB_CONCAT (GrB,_,kind,_reduce_Monoid_Scalar) \
) \
)
#define GrB_reduce(arg1,arg2,arg3,arg4,...) \
_Generic \
( \
(arg4), \
const GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \
GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \
const GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \
GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \
const GrB_Monoid : GrB_Matrix_reduce_Monoid , \
GrB_Monoid : GrB_Matrix_reduce_Monoid , \
const GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp \
) \
(arg1, arg2, arg3, arg4, __VA_ARGS__)
#endif
//==============================================================================
// GrB_transpose: matrix transpose
//==============================================================================
GB_PUBLIC
GrB_Info GrB_transpose // C<Mask> = accum (C, A')
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc // descriptor for C, Mask, and A
) ;
//==============================================================================
// GrB_kronecker: Kronecker product
//==============================================================================
// GxB_kron is historical; use GrB_kronecker instead
GB_PUBLIC
GrB_Info GxB_kron // C<Mask> = accum(C,kron(A,B)) (historical)
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix Mask, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, Mask, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_BinaryOp // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_BinaryOp op, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_Monoid // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Monoid monoid, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_kronecker_Semiring // C<M> = accum (C, kron(A,B))
(
GrB_Matrix C, // input/output matrix for results
const GrB_Matrix M, // optional mask for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for Z=accum(C,T)
const GrB_Semiring semiring, // defines '*' for T=kron(A,B)
const GrB_Matrix A, // first input: matrix A
const GrB_Matrix B, // second input: matrix B
const GrB_Descriptor desc // descriptor for C, M, A, and B
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_kronecker(C,Mask,accum,op,A,B,desc) \
_Generic \
( \
(op), \
const GrB_Semiring : GrB_Matrix_kronecker_Semiring , \
GrB_Semiring : GrB_Matrix_kronecker_Semiring , \
const GrB_Monoid : GrB_Matrix_kronecker_Monoid , \
GrB_Monoid : GrB_Matrix_kronecker_Monoid , \
const GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp , \
GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp \
) \
(C, Mask, accum, op, A, B, desc)
#endif
//==============================================================================
// GrB_Monoid: built-in monoids
//==============================================================================
GB_PUBLIC GrB_Monoid
//--------------------------------------------------------------------------
// 10 MIN monoids: (not for complex types)
//--------------------------------------------------------------------------
// GxB_MIN monoids, historical, use GrB_MIN_MONOID_* instead:
GxB_MIN_INT8_MONOID, // identity: INT8_MAX terminal: INT8_MIN
GxB_MIN_INT16_MONOID, // identity: INT16_MAX terminal: INT16_MIN
GxB_MIN_INT32_MONOID, // identity: INT32_MAX terminal: INT32_MIN
GxB_MIN_INT64_MONOID, // identity: INT64_MAX terminal: INT32_MIN
GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX terminal: 0
GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX terminal: 0
GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX terminal: 0
GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX terminal: 0
GxB_MIN_FP32_MONOID, // identity: INFINITY terminal: -INFINITY
GxB_MIN_FP64_MONOID, // identity: INFINITY terminal: -INFINITY
// preferred names from the v1.3 spec:
GrB_MIN_MONOID_INT8, // identity: INT8_MAX terminal: INT8_MIN
GrB_MIN_MONOID_INT16, // identity: INT16_MAX terminal: INT16_MIN
GrB_MIN_MONOID_INT32, // identity: INT32_MAX terminal: INT32_MIN
GrB_MIN_MONOID_INT64, // identity: INT64_MAX terminal: INT32_MIN
GrB_MIN_MONOID_UINT8, // identity: UINT8_MAX terminal: 0
GrB_MIN_MONOID_UINT16, // identity: UINT16_MAX terminal: 0
GrB_MIN_MONOID_UINT32, // identity: UINT32_MAX terminal: 0
GrB_MIN_MONOID_UINT64, // identity: UINT64_MAX terminal: 0
GrB_MIN_MONOID_FP32, // identity: INFINITY terminal: -INFINITY
GrB_MIN_MONOID_FP64, // identity: INFINITY terminal: -INFINITY
//--------------------------------------------------------------------------
// 10 MAX monoids:
//--------------------------------------------------------------------------
// GxB_MAX monoids, historical, use GrB_MAX_MONOID_* instead:
GxB_MAX_INT8_MONOID, // identity: INT8_MIN terminal: INT8_MAX
GxB_MAX_INT16_MONOID, // identity: INT16_MIN terminal: INT16_MAX
GxB_MAX_INT32_MONOID, // identity: INT32_MIN terminal: INT32_MAX
GxB_MAX_INT64_MONOID, // identity: INT64_MIN terminal: INT64_MAX
GxB_MAX_UINT8_MONOID, // identity: 0 terminal: UINT8_MAX
GxB_MAX_UINT16_MONOID, // identity: 0 terminal: UINT16_MAX
GxB_MAX_UINT32_MONOID, // identity: 0 terminal: UINT32_MAX
GxB_MAX_UINT64_MONOID, // identity: 0 terminal: UINT64_MAX
GxB_MAX_FP32_MONOID, // identity: -INFINITY terminal: INFINITY
GxB_MAX_FP64_MONOID, // identity: -INFINITY terminal: INFINITY
// preferred names from the v1.3 spec:
GrB_MAX_MONOID_INT8, // identity: INT8_MIN terminal: INT8_MAX
GrB_MAX_MONOID_INT16, // identity: INT16_MIN terminal: INT16_MAX
GrB_MAX_MONOID_INT32, // identity: INT32_MIN terminal: INT32_MAX
GrB_MAX_MONOID_INT64, // identity: INT64_MIN terminal: INT64_MAX
GrB_MAX_MONOID_UINT8, // identity: 0 terminal: UINT8_MAX
GrB_MAX_MONOID_UINT16, // identity: 0 terminal: UINT16_MAX
GrB_MAX_MONOID_UINT32, // identity: 0 terminal: UINT32_MAX
GrB_MAX_MONOID_UINT64, // identity: 0 terminal: UINT64_MAX
GrB_MAX_MONOID_FP32, // identity: -INFINITY terminal: INFINITY
GrB_MAX_MONOID_FP64, // identity: -INFINITY terminal: INFINITY
//--------------------------------------------------------------------------
// 12 PLUS monoids:
//--------------------------------------------------------------------------
// GxB_PLUS monoids, historical, use GrB_PLUS_MONOID_* instead:
GxB_PLUS_INT8_MONOID, // identity: 0
GxB_PLUS_INT16_MONOID, // identity: 0
GxB_PLUS_INT32_MONOID, // identity: 0
GxB_PLUS_INT64_MONOID, // identity: 0
GxB_PLUS_UINT8_MONOID, // identity: 0
GxB_PLUS_UINT16_MONOID, // identity: 0
GxB_PLUS_UINT32_MONOID, // identity: 0
GxB_PLUS_UINT64_MONOID, // identity: 0
GxB_PLUS_FP32_MONOID, // identity: 0
GxB_PLUS_FP64_MONOID, // identity: 0
// preferred names from the v1.3 spec:
GrB_PLUS_MONOID_INT8, // identity: 0
GrB_PLUS_MONOID_INT16, // identity: 0
GrB_PLUS_MONOID_INT32, // identity: 0
GrB_PLUS_MONOID_INT64, // identity: 0
GrB_PLUS_MONOID_UINT8, // identity: 0
GrB_PLUS_MONOID_UINT16, // identity: 0
GrB_PLUS_MONOID_UINT32, // identity: 0
GrB_PLUS_MONOID_UINT64, // identity: 0
GrB_PLUS_MONOID_FP32, // identity: 0
GrB_PLUS_MONOID_FP64, // identity: 0
// complex monoids:
GxB_PLUS_FC32_MONOID, // identity: 0
GxB_PLUS_FC64_MONOID, // identity: 0
//--------------------------------------------------------------------------
// 12 TIMES monoids: identity value is 1, int* and uint* are terminal
//--------------------------------------------------------------------------
// GxB_TIMES monoids, historical, use GrB_TIMES_MONOID_* instead:
GxB_TIMES_INT8_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT16_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT32_MONOID, // identity: 1 terminal: 0
GxB_TIMES_INT64_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT8_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT16_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT32_MONOID, // identity: 1 terminal: 0
GxB_TIMES_UINT64_MONOID, // identity: 1 terminal: 0
GxB_TIMES_FP32_MONOID, // identity: 1
GxB_TIMES_FP64_MONOID, // identity: 1
// preferred names from the v1.3 spec:
GrB_TIMES_MONOID_INT8, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT16, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT32, // identity: 1 terminal: 0
GrB_TIMES_MONOID_INT64, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT8, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT16, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT32, // identity: 1 terminal: 0
GrB_TIMES_MONOID_UINT64, // identity: 1 terminal: 0
GrB_TIMES_MONOID_FP32, // identity: 1
GrB_TIMES_MONOID_FP64, // identity: 1
// complex monoids:
GxB_TIMES_FC32_MONOID, // identity: 1
GxB_TIMES_FC64_MONOID, // identity: 1
//--------------------------------------------------------------------------
// 13 ANY monoids:
//--------------------------------------------------------------------------
GxB_ANY_BOOL_MONOID, // identity: any value terminal: any value
GxB_ANY_INT8_MONOID, // identity: any value terminal: any value
GxB_ANY_INT16_MONOID, // identity: any value terminal: any value
GxB_ANY_INT32_MONOID, // identity: any value terminal: any value
GxB_ANY_INT64_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT8_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT16_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT32_MONOID, // identity: any value terminal: any value
GxB_ANY_UINT64_MONOID, // identity: any value terminal: any value
GxB_ANY_FP32_MONOID, // identity: any value terminal: any value
GxB_ANY_FP64_MONOID, // identity: any value terminal: any value
GxB_ANY_FC32_MONOID, // identity: any value terminal: any value
GxB_ANY_FC64_MONOID, // identity: any value terminal: any value
//--------------------------------------------------------------------------
// 4 Boolean monoids: (see also the GxB_ANY_BOOL_MONOID above)
//--------------------------------------------------------------------------
// GxB_* boolean monoids, historical, use GrB_* instead:
GxB_LOR_BOOL_MONOID, // identity: false terminal: true
GxB_LAND_BOOL_MONOID, // identity: true terminal: false
GxB_LXOR_BOOL_MONOID, // identity: false
GxB_LXNOR_BOOL_MONOID, // identity: true
GxB_EQ_BOOL_MONOID, // (alternative name for GrB_LXNOR_MONOID_BOOL)
// preferred names from the v1.3 spec:
GrB_LOR_MONOID_BOOL, // identity: false terminal: true
GrB_LAND_MONOID_BOOL, // identity: true terminal: false
GrB_LXOR_MONOID_BOOL, // identity: false
GrB_LXNOR_MONOID_BOOL, // identity: true
//--------------------------------------------------------------------------
// 16 Bitwise-or monoids:
//--------------------------------------------------------------------------
// BOR monoids (bitwise or):
GxB_BOR_UINT8_MONOID, // identity: 0 terminal: 0xFF
GxB_BOR_UINT16_MONOID, // identity: 0 terminal: 0xFFFF
GxB_BOR_UINT32_MONOID, // identity: 0 terminal: 0xFFFFFFFF
GxB_BOR_UINT64_MONOID, // identity: 0 terminal: 0xFFFFFFFFFFFFFFFF
// BAND monoids (bitwise and):
GxB_BAND_UINT8_MONOID, // identity: 0xFF terminal: 0
GxB_BAND_UINT16_MONOID, // identity: 0xFFFF terminal: 0
GxB_BAND_UINT32_MONOID, // identity: 0xFFFFFFFF terminal: 0
GxB_BAND_UINT64_MONOID, // identity: 0xFFFFFFFFFFFFFFFF terminal: 0
// BXOR monoids (bitwise xor):
GxB_BXOR_UINT8_MONOID, // identity: 0
GxB_BXOR_UINT16_MONOID, // identity: 0
GxB_BXOR_UINT32_MONOID, // identity: 0
GxB_BXOR_UINT64_MONOID, // identity: 0
// BXNOR monoids (bitwise xnor):
GxB_BXNOR_UINT8_MONOID, // identity: 0xFF
GxB_BXNOR_UINT16_MONOID, // identity: 0xFFFF
GxB_BXNOR_UINT32_MONOID, // identity: 0xFFFFFFFF
GxB_BXNOR_UINT64_MONOID ; // identity: 0xFFFFFFFFFFFFFFFF
//==============================================================================
// GrB_Semiring: built-in semirings
//==============================================================================
// Using built-in types and operators, SuiteSparse:GraphBLAS provides
// 1553 pre-defined, built-in semirings:
// 1000 semirings with a multiply operator TxT -> T where T is non-Boolean,
// from the complete cross product of:
// 5 monoids: MIN, MAX, PLUS, TIMES, ANY
// 20 multiply operators:
// FIRST, SECOND, PAIR (=ONEB), MIN, MAX, PLUS, MINUS, TIMES, DIV,
// RDIV, RMINUS
// ISEQ, ISNE, ISGT, ISLT, ISGE, ISLE,
// LOR, LAND, LXOR
// 10 non-Boolean real types, T
//
// Note that min_pair, max_pair, times_pair are all identical to any_pair.
// These 30 semirings are named below, but are internally remapped to
// their corresponding any_pair semiring.
// 300 semirings with a comparator TxT -> bool, where T is
// non-Boolean, from the complete cross product of:
// 5 Boolean monoids: LAND, LOR, LXOR, EQ (=LXNOR), ANY
// 6 multiply operators: EQ, NE, GT, LT, GE, LE
// 10 non-Boolean real types, T
// 55 semirings with purely Boolean types, bool x bool -> bool, from the
// complete cross product of:
// 5 Boolean monoids LAND, LOR, LXOR, EQ (=LXNOR), ANY
// 11 multiply operators:
// FIRST, SECOND, LOR, LAND, LXOR, EQ (=LXNOR), GT, LT, GE, LE,
// PAIR (=ONEB)
//
// Note that lor_pair, land_pair, and eq_pair are all identical to
// any_pair. These 3 semirings are named below, but are internally
// remapped to any_pair_bool semiring.
// 54 complex semirings: TxT -> T where T is float complex or double complex:
// 3 complex monoids: PLUS, TIMES, ANY
// 9 complex multiply operators:
// FIRST, SECOND, PAIR (=ONEB), PLUS, MINUS, TIMES, DIV, RDIV, RMINUS
// 2 complex types
//
// Note that times_pair is identical to any_pair.
// These 2 semirings are named below, but are internally remapped to
// their corresponding any_pair semiring.
// 64 bitwise semirings: TxT -> T where T is an unsigned integer:
// 4 bitwise monoids: BOR, BAND, BXOR, BXNOR
// 4 bitwise multiply operators: BOR, BAND, BXOR, BXNOR
// 4 unsigned integer types: UINT8, UINT16, UINT32, UINT64
// 80 positional semirings: XxX -> T where T is int64 or int32, and the type of
// X is ignored:
// 5 monoids: MIN, MAX, PLUS, TIMES, ANY
// 8 multiply operators:
// FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1,
// SECONDI, SECONDI1, SECONDJ, SECONDJ1
// 2 types: int32, int64
// The ANY operator is also valid to use as a multiplicative operator in a
// semiring, but serves no purpose in that case. The ANY operator is meant as
// a fast additive operator for a monoid, that terminates, or short-circuits,
// as soon as any value is found. A valid user semiring can be constructed
// with ANY as the multiply operator, but they are not predefined below.
// Likewise, additional built-in operators can be used as multiplicative
// operators for floating-point semirings (POW, ATAN2, HYPOT, ...) and many
// more semirings can be constructed from bitwise monoids and many integer
// binary (non-bitwise) multiplicative operators, but these are not
// pre-defined.
// In the names below, each semiring has a name of the form GxB_add_mult_T
// where add is the additive monoid, mult is the multiply operator, and T is
// the type. The type T is always the type of x and y for the z=mult(x,y)
// operator. The monoid's three types and the ztype of the mult operator are
// always the same. This is the type T for the first set, and Boolean for
// the second and third sets of semirngs.
// 1553 = 1000 + 300 + 55 + 54 + 64 + 80 semirings are named below, but 35 = 30
// + 3 + 2 are identical to the corresponding any_pair semirings of the same
// type. For positional semirings, the mulitiply ops FIRSTJ and SECONDI are
// identical, as are FIRSTJ1 and SECONDI1. These semirings still appear as
// predefined, for convenience.
GB_PUBLIC GrB_Semiring
//------------------------------------------------------------------------------
// 1000 non-Boolean semirings where all types are the same, given by suffix _T
//------------------------------------------------------------------------------
// semirings with multiply op: z = FIRST (x,y), all types x,y,z the same:
GxB_MIN_FIRST_INT8 , GxB_MAX_FIRST_INT8 , GxB_PLUS_FIRST_INT8 , GxB_TIMES_FIRST_INT8 , GxB_ANY_FIRST_INT8 ,
GxB_MIN_FIRST_INT16 , GxB_MAX_FIRST_INT16 , GxB_PLUS_FIRST_INT16 , GxB_TIMES_FIRST_INT16 , GxB_ANY_FIRST_INT16 ,
GxB_MIN_FIRST_INT32 , GxB_MAX_FIRST_INT32 , GxB_PLUS_FIRST_INT32 , GxB_TIMES_FIRST_INT32 , GxB_ANY_FIRST_INT32 ,
GxB_MIN_FIRST_INT64 , GxB_MAX_FIRST_INT64 , GxB_PLUS_FIRST_INT64 , GxB_TIMES_FIRST_INT64 , GxB_ANY_FIRST_INT64 ,
GxB_MIN_FIRST_UINT8 , GxB_MAX_FIRST_UINT8 , GxB_PLUS_FIRST_UINT8 , GxB_TIMES_FIRST_UINT8 , GxB_ANY_FIRST_UINT8 ,
GxB_MIN_FIRST_UINT16 , GxB_MAX_FIRST_UINT16 , GxB_PLUS_FIRST_UINT16 , GxB_TIMES_FIRST_UINT16 , GxB_ANY_FIRST_UINT16 ,
GxB_MIN_FIRST_UINT32 , GxB_MAX_FIRST_UINT32 , GxB_PLUS_FIRST_UINT32 , GxB_TIMES_FIRST_UINT32 , GxB_ANY_FIRST_UINT32 ,
GxB_MIN_FIRST_UINT64 , GxB_MAX_FIRST_UINT64 , GxB_PLUS_FIRST_UINT64 , GxB_TIMES_FIRST_UINT64 , GxB_ANY_FIRST_UINT64 ,
GxB_MIN_FIRST_FP32 , GxB_MAX_FIRST_FP32 , GxB_PLUS_FIRST_FP32 , GxB_TIMES_FIRST_FP32 , GxB_ANY_FIRST_FP32 ,
GxB_MIN_FIRST_FP64 , GxB_MAX_FIRST_FP64 , GxB_PLUS_FIRST_FP64 , GxB_TIMES_FIRST_FP64 , GxB_ANY_FIRST_FP64 ,
// semirings with multiply op: z = SECOND (x,y), all types x,y,z the same:
GxB_MIN_SECOND_INT8 , GxB_MAX_SECOND_INT8 , GxB_PLUS_SECOND_INT8 , GxB_TIMES_SECOND_INT8 , GxB_ANY_SECOND_INT8 ,
GxB_MIN_SECOND_INT16 , GxB_MAX_SECOND_INT16 , GxB_PLUS_SECOND_INT16 , GxB_TIMES_SECOND_INT16 , GxB_ANY_SECOND_INT16 ,
GxB_MIN_SECOND_INT32 , GxB_MAX_SECOND_INT32 , GxB_PLUS_SECOND_INT32 , GxB_TIMES_SECOND_INT32 , GxB_ANY_SECOND_INT32 ,
GxB_MIN_SECOND_INT64 , GxB_MAX_SECOND_INT64 , GxB_PLUS_SECOND_INT64 , GxB_TIMES_SECOND_INT64 , GxB_ANY_SECOND_INT64 ,
GxB_MIN_SECOND_UINT8 , GxB_MAX_SECOND_UINT8 , GxB_PLUS_SECOND_UINT8 , GxB_TIMES_SECOND_UINT8 , GxB_ANY_SECOND_UINT8 ,
GxB_MIN_SECOND_UINT16 , GxB_MAX_SECOND_UINT16 , GxB_PLUS_SECOND_UINT16 , GxB_TIMES_SECOND_UINT16, GxB_ANY_SECOND_UINT16 ,
GxB_MIN_SECOND_UINT32 , GxB_MAX_SECOND_UINT32 , GxB_PLUS_SECOND_UINT32 , GxB_TIMES_SECOND_UINT32, GxB_ANY_SECOND_UINT32 ,
GxB_MIN_SECOND_UINT64 , GxB_MAX_SECOND_UINT64 , GxB_PLUS_SECOND_UINT64 , GxB_TIMES_SECOND_UINT64, GxB_ANY_SECOND_UINT64 ,
GxB_MIN_SECOND_FP32 , GxB_MAX_SECOND_FP32 , GxB_PLUS_SECOND_FP32 , GxB_TIMES_SECOND_FP32 , GxB_ANY_SECOND_FP32 ,
GxB_MIN_SECOND_FP64 , GxB_MAX_SECOND_FP64 , GxB_PLUS_SECOND_FP64 , GxB_TIMES_SECOND_FP64 , GxB_ANY_SECOND_FP64 ,
// semirings with multiply op: z = PAIR (x,y), all types x,y,z the same:
// (note that min_pair, max_pair, times_pair are all identical to any_pair, and are marked below)
GxB_MIN_PAIR_INT8 /**/, GxB_MAX_PAIR_INT8 /**/, GxB_PLUS_PAIR_INT8 , GxB_TIMES_PAIR_INT8 /**/, GxB_ANY_PAIR_INT8 ,
GxB_MIN_PAIR_INT16 /**/, GxB_MAX_PAIR_INT16 /**/, GxB_PLUS_PAIR_INT16 , GxB_TIMES_PAIR_INT16 /**/, GxB_ANY_PAIR_INT16 ,
GxB_MIN_PAIR_INT32 /**/, GxB_MAX_PAIR_INT32 /**/, GxB_PLUS_PAIR_INT32 , GxB_TIMES_PAIR_INT32 /**/, GxB_ANY_PAIR_INT32 ,
GxB_MIN_PAIR_INT64 /**/, GxB_MAX_PAIR_INT64 /**/, GxB_PLUS_PAIR_INT64 , GxB_TIMES_PAIR_INT64 /**/, GxB_ANY_PAIR_INT64 ,
GxB_MIN_PAIR_UINT8 /**/, GxB_MAX_PAIR_UINT8 /**/, GxB_PLUS_PAIR_UINT8 , GxB_TIMES_PAIR_UINT8 /**/, GxB_ANY_PAIR_UINT8 ,
GxB_MIN_PAIR_UINT16/**/, GxB_MAX_PAIR_UINT16/**/, GxB_PLUS_PAIR_UINT16 , GxB_TIMES_PAIR_UINT16/**/, GxB_ANY_PAIR_UINT16 ,
GxB_MIN_PAIR_UINT32/**/, GxB_MAX_PAIR_UINT32/**/, GxB_PLUS_PAIR_UINT32 , GxB_TIMES_PAIR_UINT32/**/, GxB_ANY_PAIR_UINT32 ,
GxB_MIN_PAIR_UINT64/**/, GxB_MAX_PAIR_UINT64/**/, GxB_PLUS_PAIR_UINT64 , GxB_TIMES_PAIR_UINT64/**/, GxB_ANY_PAIR_UINT64 ,
GxB_MIN_PAIR_FP32 /**/, GxB_MAX_PAIR_FP32 /**/, GxB_PLUS_PAIR_FP32 , GxB_TIMES_PAIR_FP32 /**/, GxB_ANY_PAIR_FP32 ,
GxB_MIN_PAIR_FP64 /**/, GxB_MAX_PAIR_FP64 /**/, GxB_PLUS_PAIR_FP64 , GxB_TIMES_PAIR_FP64 /**/, GxB_ANY_PAIR_FP64 ,
// semirings with multiply op: z = MIN (x,y), all types x,y,z the same:
GxB_MIN_MIN_INT8 , GxB_MAX_MIN_INT8 , GxB_PLUS_MIN_INT8 , GxB_TIMES_MIN_INT8 , GxB_ANY_MIN_INT8 ,
GxB_MIN_MIN_INT16 , GxB_MAX_MIN_INT16 , GxB_PLUS_MIN_INT16 , GxB_TIMES_MIN_INT16 , GxB_ANY_MIN_INT16 ,
GxB_MIN_MIN_INT32 , GxB_MAX_MIN_INT32 , GxB_PLUS_MIN_INT32 , GxB_TIMES_MIN_INT32 , GxB_ANY_MIN_INT32 ,
GxB_MIN_MIN_INT64 , GxB_MAX_MIN_INT64 , GxB_PLUS_MIN_INT64 , GxB_TIMES_MIN_INT64 , GxB_ANY_MIN_INT64 ,
GxB_MIN_MIN_UINT8 , GxB_MAX_MIN_UINT8 , GxB_PLUS_MIN_UINT8 , GxB_TIMES_MIN_UINT8 , GxB_ANY_MIN_UINT8 ,
GxB_MIN_MIN_UINT16 , GxB_MAX_MIN_UINT16 , GxB_PLUS_MIN_UINT16 , GxB_TIMES_MIN_UINT16 , GxB_ANY_MIN_UINT16 ,
GxB_MIN_MIN_UINT32 , GxB_MAX_MIN_UINT32 , GxB_PLUS_MIN_UINT32 , GxB_TIMES_MIN_UINT32 , GxB_ANY_MIN_UINT32 ,
GxB_MIN_MIN_UINT64 , GxB_MAX_MIN_UINT64 , GxB_PLUS_MIN_UINT64 , GxB_TIMES_MIN_UINT64 , GxB_ANY_MIN_UINT64 ,
GxB_MIN_MIN_FP32 , GxB_MAX_MIN_FP32 , GxB_PLUS_MIN_FP32 , GxB_TIMES_MIN_FP32 , GxB_ANY_MIN_FP32 ,
GxB_MIN_MIN_FP64 , GxB_MAX_MIN_FP64 , GxB_PLUS_MIN_FP64 , GxB_TIMES_MIN_FP64 , GxB_ANY_MIN_FP64 ,
// semirings with multiply op: z = MAX (x,y), all types x,y,z the same:
GxB_MIN_MAX_INT8 , GxB_MAX_MAX_INT8 , GxB_PLUS_MAX_INT8 , GxB_TIMES_MAX_INT8 , GxB_ANY_MAX_INT8 ,
GxB_MIN_MAX_INT16 , GxB_MAX_MAX_INT16 , GxB_PLUS_MAX_INT16 , GxB_TIMES_MAX_INT16 , GxB_ANY_MAX_INT16 ,
GxB_MIN_MAX_INT32 , GxB_MAX_MAX_INT32 , GxB_PLUS_MAX_INT32 , GxB_TIMES_MAX_INT32 , GxB_ANY_MAX_INT32 ,
GxB_MIN_MAX_INT64 , GxB_MAX_MAX_INT64 , GxB_PLUS_MAX_INT64 , GxB_TIMES_MAX_INT64 , GxB_ANY_MAX_INT64 ,
GxB_MIN_MAX_UINT8 , GxB_MAX_MAX_UINT8 , GxB_PLUS_MAX_UINT8 , GxB_TIMES_MAX_UINT8 , GxB_ANY_MAX_UINT8 ,
GxB_MIN_MAX_UINT16 , GxB_MAX_MAX_UINT16 , GxB_PLUS_MAX_UINT16 , GxB_TIMES_MAX_UINT16 , GxB_ANY_MAX_UINT16 ,
GxB_MIN_MAX_UINT32 , GxB_MAX_MAX_UINT32 , GxB_PLUS_MAX_UINT32 , GxB_TIMES_MAX_UINT32 , GxB_ANY_MAX_UINT32 ,
GxB_MIN_MAX_UINT64 , GxB_MAX_MAX_UINT64 , GxB_PLUS_MAX_UINT64 , GxB_TIMES_MAX_UINT64 , GxB_ANY_MAX_UINT64 ,
GxB_MIN_MAX_FP32 , GxB_MAX_MAX_FP32 , GxB_PLUS_MAX_FP32 , GxB_TIMES_MAX_FP32 , GxB_ANY_MAX_FP32 ,
GxB_MIN_MAX_FP64 , GxB_MAX_MAX_FP64 , GxB_PLUS_MAX_FP64 , GxB_TIMES_MAX_FP64 , GxB_ANY_MAX_FP64 ,
// semirings with multiply op: z = PLUS (x,y), all types x,y,z the same:
GxB_MIN_PLUS_INT8 , GxB_MAX_PLUS_INT8 , GxB_PLUS_PLUS_INT8 , GxB_TIMES_PLUS_INT8 , GxB_ANY_PLUS_INT8 ,
GxB_MIN_PLUS_INT16 , GxB_MAX_PLUS_INT16 , GxB_PLUS_PLUS_INT16 , GxB_TIMES_PLUS_INT16 , GxB_ANY_PLUS_INT16 ,
GxB_MIN_PLUS_INT32 , GxB_MAX_PLUS_INT32 , GxB_PLUS_PLUS_INT32 , GxB_TIMES_PLUS_INT32 , GxB_ANY_PLUS_INT32 ,
GxB_MIN_PLUS_INT64 , GxB_MAX_PLUS_INT64 , GxB_PLUS_PLUS_INT64 , GxB_TIMES_PLUS_INT64 , GxB_ANY_PLUS_INT64 ,
GxB_MIN_PLUS_UINT8 , GxB_MAX_PLUS_UINT8 , GxB_PLUS_PLUS_UINT8 , GxB_TIMES_PLUS_UINT8 , GxB_ANY_PLUS_UINT8 ,
GxB_MIN_PLUS_UINT16 , GxB_MAX_PLUS_UINT16 , GxB_PLUS_PLUS_UINT16 , GxB_TIMES_PLUS_UINT16 , GxB_ANY_PLUS_UINT16 ,
GxB_MIN_PLUS_UINT32 , GxB_MAX_PLUS_UINT32 , GxB_PLUS_PLUS_UINT32 , GxB_TIMES_PLUS_UINT32 , GxB_ANY_PLUS_UINT32 ,
GxB_MIN_PLUS_UINT64 , GxB_MAX_PLUS_UINT64 , GxB_PLUS_PLUS_UINT64 , GxB_TIMES_PLUS_UINT64 , GxB_ANY_PLUS_UINT64 ,
GxB_MIN_PLUS_FP32 , GxB_MAX_PLUS_FP32 , GxB_PLUS_PLUS_FP32 , GxB_TIMES_PLUS_FP32 , GxB_ANY_PLUS_FP32 ,
GxB_MIN_PLUS_FP64 , GxB_MAX_PLUS_FP64 , GxB_PLUS_PLUS_FP64 , GxB_TIMES_PLUS_FP64 , GxB_ANY_PLUS_FP64 ,
// semirings with multiply op: z = MINUS (x,y), all types x,y,z the same:
GxB_MIN_MINUS_INT8 , GxB_MAX_MINUS_INT8 , GxB_PLUS_MINUS_INT8 , GxB_TIMES_MINUS_INT8 , GxB_ANY_MINUS_INT8 ,
GxB_MIN_MINUS_INT16 , GxB_MAX_MINUS_INT16 , GxB_PLUS_MINUS_INT16 , GxB_TIMES_MINUS_INT16 , GxB_ANY_MINUS_INT16 ,
GxB_MIN_MINUS_INT32 , GxB_MAX_MINUS_INT32 , GxB_PLUS_MINUS_INT32 , GxB_TIMES_MINUS_INT32 , GxB_ANY_MINUS_INT32 ,
GxB_MIN_MINUS_INT64 , GxB_MAX_MINUS_INT64 , GxB_PLUS_MINUS_INT64 , GxB_TIMES_MINUS_INT64 , GxB_ANY_MINUS_INT64 ,
GxB_MIN_MINUS_UINT8 , GxB_MAX_MINUS_UINT8 , GxB_PLUS_MINUS_UINT8 , GxB_TIMES_MINUS_UINT8 , GxB_ANY_MINUS_UINT8 ,
GxB_MIN_MINUS_UINT16 , GxB_MAX_MINUS_UINT16 , GxB_PLUS_MINUS_UINT16 , GxB_TIMES_MINUS_UINT16 , GxB_ANY_MINUS_UINT16 ,
GxB_MIN_MINUS_UINT32 , GxB_MAX_MINUS_UINT32 , GxB_PLUS_MINUS_UINT32 , GxB_TIMES_MINUS_UINT32 , GxB_ANY_MINUS_UINT32 ,
GxB_MIN_MINUS_UINT64 , GxB_MAX_MINUS_UINT64 , GxB_PLUS_MINUS_UINT64 , GxB_TIMES_MINUS_UINT64 , GxB_ANY_MINUS_UINT64 ,
GxB_MIN_MINUS_FP32 , GxB_MAX_MINUS_FP32 , GxB_PLUS_MINUS_FP32 , GxB_TIMES_MINUS_FP32 , GxB_ANY_MINUS_FP32 ,
GxB_MIN_MINUS_FP64 , GxB_MAX_MINUS_FP64 , GxB_PLUS_MINUS_FP64 , GxB_TIMES_MINUS_FP64 , GxB_ANY_MINUS_FP64 ,
// semirings with multiply op: z = TIMES (x,y), all types x,y,z the same:
GxB_MIN_TIMES_INT8 , GxB_MAX_TIMES_INT8 , GxB_PLUS_TIMES_INT8 , GxB_TIMES_TIMES_INT8 , GxB_ANY_TIMES_INT8 ,
GxB_MIN_TIMES_INT16 , GxB_MAX_TIMES_INT16 , GxB_PLUS_TIMES_INT16 , GxB_TIMES_TIMES_INT16 , GxB_ANY_TIMES_INT16 ,
GxB_MIN_TIMES_INT32 , GxB_MAX_TIMES_INT32 , GxB_PLUS_TIMES_INT32 , GxB_TIMES_TIMES_INT32 , GxB_ANY_TIMES_INT32 ,
GxB_MIN_TIMES_INT64 , GxB_MAX_TIMES_INT64 , GxB_PLUS_TIMES_INT64 , GxB_TIMES_TIMES_INT64 , GxB_ANY_TIMES_INT64 ,
GxB_MIN_TIMES_UINT8 , GxB_MAX_TIMES_UINT8 , GxB_PLUS_TIMES_UINT8 , GxB_TIMES_TIMES_UINT8 , GxB_ANY_TIMES_UINT8 ,
GxB_MIN_TIMES_UINT16 , GxB_MAX_TIMES_UINT16 , GxB_PLUS_TIMES_UINT16 , GxB_TIMES_TIMES_UINT16 , GxB_ANY_TIMES_UINT16 ,
GxB_MIN_TIMES_UINT32 , GxB_MAX_TIMES_UINT32 , GxB_PLUS_TIMES_UINT32 , GxB_TIMES_TIMES_UINT32 , GxB_ANY_TIMES_UINT32 ,
GxB_MIN_TIMES_UINT64 , GxB_MAX_TIMES_UINT64 , GxB_PLUS_TIMES_UINT64 , GxB_TIMES_TIMES_UINT64 , GxB_ANY_TIMES_UINT64 ,
GxB_MIN_TIMES_FP32 , GxB_MAX_TIMES_FP32 , GxB_PLUS_TIMES_FP32 , GxB_TIMES_TIMES_FP32 , GxB_ANY_TIMES_FP32 ,
GxB_MIN_TIMES_FP64 , GxB_MAX_TIMES_FP64 , GxB_PLUS_TIMES_FP64 , GxB_TIMES_TIMES_FP64 , GxB_ANY_TIMES_FP64 ,
// semirings with multiply op: z = DIV (x,y), all types x,y,z the same:
GxB_MIN_DIV_INT8 , GxB_MAX_DIV_INT8 , GxB_PLUS_DIV_INT8 , GxB_TIMES_DIV_INT8 , GxB_ANY_DIV_INT8 ,
GxB_MIN_DIV_INT16 , GxB_MAX_DIV_INT16 , GxB_PLUS_DIV_INT16 , GxB_TIMES_DIV_INT16 , GxB_ANY_DIV_INT16 ,
GxB_MIN_DIV_INT32 , GxB_MAX_DIV_INT32 , GxB_PLUS_DIV_INT32 , GxB_TIMES_DIV_INT32 , GxB_ANY_DIV_INT32 ,
GxB_MIN_DIV_INT64 , GxB_MAX_DIV_INT64 , GxB_PLUS_DIV_INT64 , GxB_TIMES_DIV_INT64 , GxB_ANY_DIV_INT64 ,
GxB_MIN_DIV_UINT8 , GxB_MAX_DIV_UINT8 , GxB_PLUS_DIV_UINT8 , GxB_TIMES_DIV_UINT8 , GxB_ANY_DIV_UINT8 ,
GxB_MIN_DIV_UINT16 , GxB_MAX_DIV_UINT16 , GxB_PLUS_DIV_UINT16 , GxB_TIMES_DIV_UINT16 , GxB_ANY_DIV_UINT16 ,
GxB_MIN_DIV_UINT32 , GxB_MAX_DIV_UINT32 , GxB_PLUS_DIV_UINT32 , GxB_TIMES_DIV_UINT32 , GxB_ANY_DIV_UINT32 ,
GxB_MIN_DIV_UINT64 , GxB_MAX_DIV_UINT64 , GxB_PLUS_DIV_UINT64 , GxB_TIMES_DIV_UINT64 , GxB_ANY_DIV_UINT64 ,
GxB_MIN_DIV_FP32 , GxB_MAX_DIV_FP32 , GxB_PLUS_DIV_FP32 , GxB_TIMES_DIV_FP32 , GxB_ANY_DIV_FP32 ,
GxB_MIN_DIV_FP64 , GxB_MAX_DIV_FP64 , GxB_PLUS_DIV_FP64 , GxB_TIMES_DIV_FP64 , GxB_ANY_DIV_FP64 ,
// semirings with multiply op: z = RDIV (x,y), all types x,y,z the same:
GxB_MIN_RDIV_INT8 , GxB_MAX_RDIV_INT8 , GxB_PLUS_RDIV_INT8 , GxB_TIMES_RDIV_INT8 , GxB_ANY_RDIV_INT8 ,
GxB_MIN_RDIV_INT16 , GxB_MAX_RDIV_INT16 , GxB_PLUS_RDIV_INT16 , GxB_TIMES_RDIV_INT16 , GxB_ANY_RDIV_INT16 ,
GxB_MIN_RDIV_INT32 , GxB_MAX_RDIV_INT32 , GxB_PLUS_RDIV_INT32 , GxB_TIMES_RDIV_INT32 , GxB_ANY_RDIV_INT32 ,
GxB_MIN_RDIV_INT64 , GxB_MAX_RDIV_INT64 , GxB_PLUS_RDIV_INT64 , GxB_TIMES_RDIV_INT64 , GxB_ANY_RDIV_INT64 ,
GxB_MIN_RDIV_UINT8 , GxB_MAX_RDIV_UINT8 , GxB_PLUS_RDIV_UINT8 , GxB_TIMES_RDIV_UINT8 , GxB_ANY_RDIV_UINT8 ,
GxB_MIN_RDIV_UINT16 , GxB_MAX_RDIV_UINT16 , GxB_PLUS_RDIV_UINT16 , GxB_TIMES_RDIV_UINT16 , GxB_ANY_RDIV_UINT16 ,
GxB_MIN_RDIV_UINT32 , GxB_MAX_RDIV_UINT32 , GxB_PLUS_RDIV_UINT32 , GxB_TIMES_RDIV_UINT32 , GxB_ANY_RDIV_UINT32 ,
GxB_MIN_RDIV_UINT64 , GxB_MAX_RDIV_UINT64 , GxB_PLUS_RDIV_UINT64 , GxB_TIMES_RDIV_UINT64 , GxB_ANY_RDIV_UINT64 ,
GxB_MIN_RDIV_FP32 , GxB_MAX_RDIV_FP32 , GxB_PLUS_RDIV_FP32 , GxB_TIMES_RDIV_FP32 , GxB_ANY_RDIV_FP32 ,
GxB_MIN_RDIV_FP64 , GxB_MAX_RDIV_FP64 , GxB_PLUS_RDIV_FP64 , GxB_TIMES_RDIV_FP64 , GxB_ANY_RDIV_FP64 ,
// semirings with multiply op: z = RMINUS (x,y), all types x,y,z the same:
GxB_MIN_RMINUS_INT8 , GxB_MAX_RMINUS_INT8 , GxB_PLUS_RMINUS_INT8 , GxB_TIMES_RMINUS_INT8 , GxB_ANY_RMINUS_INT8 ,
GxB_MIN_RMINUS_INT16 , GxB_MAX_RMINUS_INT16 , GxB_PLUS_RMINUS_INT16 , GxB_TIMES_RMINUS_INT16 , GxB_ANY_RMINUS_INT16 ,
GxB_MIN_RMINUS_INT32 , GxB_MAX_RMINUS_INT32 , GxB_PLUS_RMINUS_INT32 , GxB_TIMES_RMINUS_INT32 , GxB_ANY_RMINUS_INT32 ,
GxB_MIN_RMINUS_INT64 , GxB_MAX_RMINUS_INT64 , GxB_PLUS_RMINUS_INT64 , GxB_TIMES_RMINUS_INT64 , GxB_ANY_RMINUS_INT64 ,
GxB_MIN_RMINUS_UINT8 , GxB_MAX_RMINUS_UINT8 , GxB_PLUS_RMINUS_UINT8 , GxB_TIMES_RMINUS_UINT8 , GxB_ANY_RMINUS_UINT8 ,
GxB_MIN_RMINUS_UINT16 , GxB_MAX_RMINUS_UINT16 , GxB_PLUS_RMINUS_UINT16 , GxB_TIMES_RMINUS_UINT16, GxB_ANY_RMINUS_UINT16 ,
GxB_MIN_RMINUS_UINT32 , GxB_MAX_RMINUS_UINT32 , GxB_PLUS_RMINUS_UINT32 , GxB_TIMES_RMINUS_UINT32, GxB_ANY_RMINUS_UINT32 ,
GxB_MIN_RMINUS_UINT64 , GxB_MAX_RMINUS_UINT64 , GxB_PLUS_RMINUS_UINT64 , GxB_TIMES_RMINUS_UINT64, GxB_ANY_RMINUS_UINT64 ,
GxB_MIN_RMINUS_FP32 , GxB_MAX_RMINUS_FP32 , GxB_PLUS_RMINUS_FP32 , GxB_TIMES_RMINUS_FP32 , GxB_ANY_RMINUS_FP32 ,
GxB_MIN_RMINUS_FP64 , GxB_MAX_RMINUS_FP64 , GxB_PLUS_RMINUS_FP64 , GxB_TIMES_RMINUS_FP64 , GxB_ANY_RMINUS_FP64 ,
// semirings with multiply op: z = ISEQ (x,y), all types x,y,z the same:
GxB_MIN_ISEQ_INT8 , GxB_MAX_ISEQ_INT8 , GxB_PLUS_ISEQ_INT8 , GxB_TIMES_ISEQ_INT8 , GxB_ANY_ISEQ_INT8 ,
GxB_MIN_ISEQ_INT16 , GxB_MAX_ISEQ_INT16 , GxB_PLUS_ISEQ_INT16 , GxB_TIMES_ISEQ_INT16 , GxB_ANY_ISEQ_INT16 ,
GxB_MIN_ISEQ_INT32 , GxB_MAX_ISEQ_INT32 , GxB_PLUS_ISEQ_INT32 , GxB_TIMES_ISEQ_INT32 , GxB_ANY_ISEQ_INT32 ,
GxB_MIN_ISEQ_INT64 , GxB_MAX_ISEQ_INT64 , GxB_PLUS_ISEQ_INT64 , GxB_TIMES_ISEQ_INT64 , GxB_ANY_ISEQ_INT64 ,
GxB_MIN_ISEQ_UINT8 , GxB_MAX_ISEQ_UINT8 , GxB_PLUS_ISEQ_UINT8 , GxB_TIMES_ISEQ_UINT8 , GxB_ANY_ISEQ_UINT8 ,
GxB_MIN_ISEQ_UINT16 , GxB_MAX_ISEQ_UINT16 , GxB_PLUS_ISEQ_UINT16 , GxB_TIMES_ISEQ_UINT16 , GxB_ANY_ISEQ_UINT16 ,
GxB_MIN_ISEQ_UINT32 , GxB_MAX_ISEQ_UINT32 , GxB_PLUS_ISEQ_UINT32 , GxB_TIMES_ISEQ_UINT32 , GxB_ANY_ISEQ_UINT32 ,
GxB_MIN_ISEQ_UINT64 , GxB_MAX_ISEQ_UINT64 , GxB_PLUS_ISEQ_UINT64 , GxB_TIMES_ISEQ_UINT64 , GxB_ANY_ISEQ_UINT64 ,
GxB_MIN_ISEQ_FP32 , GxB_MAX_ISEQ_FP32 , GxB_PLUS_ISEQ_FP32 , GxB_TIMES_ISEQ_FP32 , GxB_ANY_ISEQ_FP32 ,
GxB_MIN_ISEQ_FP64 , GxB_MAX_ISEQ_FP64 , GxB_PLUS_ISEQ_FP64 , GxB_TIMES_ISEQ_FP64 , GxB_ANY_ISEQ_FP64 ,
// semirings with multiply op: z = ISNE (x,y), all types x,y,z the same:
GxB_MIN_ISNE_INT8 , GxB_MAX_ISNE_INT8 , GxB_PLUS_ISNE_INT8 , GxB_TIMES_ISNE_INT8 , GxB_ANY_ISNE_INT8 ,
GxB_MIN_ISNE_INT16 , GxB_MAX_ISNE_INT16 , GxB_PLUS_ISNE_INT16 , GxB_TIMES_ISNE_INT16 , GxB_ANY_ISNE_INT16 ,
GxB_MIN_ISNE_INT32 , GxB_MAX_ISNE_INT32 , GxB_PLUS_ISNE_INT32 , GxB_TIMES_ISNE_INT32 , GxB_ANY_ISNE_INT32 ,
GxB_MIN_ISNE_INT64 , GxB_MAX_ISNE_INT64 , GxB_PLUS_ISNE_INT64 , GxB_TIMES_ISNE_INT64 , GxB_ANY_ISNE_INT64 ,
GxB_MIN_ISNE_UINT8 , GxB_MAX_ISNE_UINT8 , GxB_PLUS_ISNE_UINT8 , GxB_TIMES_ISNE_UINT8 , GxB_ANY_ISNE_UINT8 ,
GxB_MIN_ISNE_UINT16 , GxB_MAX_ISNE_UINT16 , GxB_PLUS_ISNE_UINT16 , GxB_TIMES_ISNE_UINT16 , GxB_ANY_ISNE_UINT16 ,
GxB_MIN_ISNE_UINT32 , GxB_MAX_ISNE_UINT32 , GxB_PLUS_ISNE_UINT32 , GxB_TIMES_ISNE_UINT32 , GxB_ANY_ISNE_UINT32 ,
GxB_MIN_ISNE_UINT64 , GxB_MAX_ISNE_UINT64 , GxB_PLUS_ISNE_UINT64 , GxB_TIMES_ISNE_UINT64 , GxB_ANY_ISNE_UINT64 ,
GxB_MIN_ISNE_FP32 , GxB_MAX_ISNE_FP32 , GxB_PLUS_ISNE_FP32 , GxB_TIMES_ISNE_FP32 , GxB_ANY_ISNE_FP32 ,
GxB_MIN_ISNE_FP64 , GxB_MAX_ISNE_FP64 , GxB_PLUS_ISNE_FP64 , GxB_TIMES_ISNE_FP64 , GxB_ANY_ISNE_FP64 ,
// semirings with multiply op: z = ISGT (x,y), all types x,y,z the same:
GxB_MIN_ISGT_INT8 , GxB_MAX_ISGT_INT8 , GxB_PLUS_ISGT_INT8 , GxB_TIMES_ISGT_INT8 , GxB_ANY_ISGT_INT8 ,
GxB_MIN_ISGT_INT16 , GxB_MAX_ISGT_INT16 , GxB_PLUS_ISGT_INT16 , GxB_TIMES_ISGT_INT16 , GxB_ANY_ISGT_INT16 ,
GxB_MIN_ISGT_INT32 , GxB_MAX_ISGT_INT32 , GxB_PLUS_ISGT_INT32 , GxB_TIMES_ISGT_INT32 , GxB_ANY_ISGT_INT32 ,
GxB_MIN_ISGT_INT64 , GxB_MAX_ISGT_INT64 , GxB_PLUS_ISGT_INT64 , GxB_TIMES_ISGT_INT64 , GxB_ANY_ISGT_INT64 ,
GxB_MIN_ISGT_UINT8 , GxB_MAX_ISGT_UINT8 , GxB_PLUS_ISGT_UINT8 , GxB_TIMES_ISGT_UINT8 , GxB_ANY_ISGT_UINT8 ,
GxB_MIN_ISGT_UINT16 , GxB_MAX_ISGT_UINT16 , GxB_PLUS_ISGT_UINT16 , GxB_TIMES_ISGT_UINT16 , GxB_ANY_ISGT_UINT16 ,
GxB_MIN_ISGT_UINT32 , GxB_MAX_ISGT_UINT32 , GxB_PLUS_ISGT_UINT32 , GxB_TIMES_ISGT_UINT32 , GxB_ANY_ISGT_UINT32 ,
GxB_MIN_ISGT_UINT64 , GxB_MAX_ISGT_UINT64 , GxB_PLUS_ISGT_UINT64 , GxB_TIMES_ISGT_UINT64 , GxB_ANY_ISGT_UINT64 ,
GxB_MIN_ISGT_FP32 , GxB_MAX_ISGT_FP32 , GxB_PLUS_ISGT_FP32 , GxB_TIMES_ISGT_FP32 , GxB_ANY_ISGT_FP32 ,
GxB_MIN_ISGT_FP64 , GxB_MAX_ISGT_FP64 , GxB_PLUS_ISGT_FP64 , GxB_TIMES_ISGT_FP64 , GxB_ANY_ISGT_FP64 ,
// semirings with multiply op: z = ISLT (x,y), all types x,y,z the same:
GxB_MIN_ISLT_INT8 , GxB_MAX_ISLT_INT8 , GxB_PLUS_ISLT_INT8 , GxB_TIMES_ISLT_INT8 , GxB_ANY_ISLT_INT8 ,
GxB_MIN_ISLT_INT16 , GxB_MAX_ISLT_INT16 , GxB_PLUS_ISLT_INT16 , GxB_TIMES_ISLT_INT16 , GxB_ANY_ISLT_INT16 ,
GxB_MIN_ISLT_INT32 , GxB_MAX_ISLT_INT32 , GxB_PLUS_ISLT_INT32 , GxB_TIMES_ISLT_INT32 , GxB_ANY_ISLT_INT32 ,
GxB_MIN_ISLT_INT64 , GxB_MAX_ISLT_INT64 , GxB_PLUS_ISLT_INT64 , GxB_TIMES_ISLT_INT64 , GxB_ANY_ISLT_INT64 ,
GxB_MIN_ISLT_UINT8 , GxB_MAX_ISLT_UINT8 , GxB_PLUS_ISLT_UINT8 , GxB_TIMES_ISLT_UINT8 , GxB_ANY_ISLT_UINT8 ,
GxB_MIN_ISLT_UINT16 , GxB_MAX_ISLT_UINT16 , GxB_PLUS_ISLT_UINT16 , GxB_TIMES_ISLT_UINT16 , GxB_ANY_ISLT_UINT16 ,
GxB_MIN_ISLT_UINT32 , GxB_MAX_ISLT_UINT32 , GxB_PLUS_ISLT_UINT32 , GxB_TIMES_ISLT_UINT32 , GxB_ANY_ISLT_UINT32 ,
GxB_MIN_ISLT_UINT64 , GxB_MAX_ISLT_UINT64 , GxB_PLUS_ISLT_UINT64 , GxB_TIMES_ISLT_UINT64 , GxB_ANY_ISLT_UINT64 ,
GxB_MIN_ISLT_FP32 , GxB_MAX_ISLT_FP32 , GxB_PLUS_ISLT_FP32 , GxB_TIMES_ISLT_FP32 , GxB_ANY_ISLT_FP32 ,
GxB_MIN_ISLT_FP64 , GxB_MAX_ISLT_FP64 , GxB_PLUS_ISLT_FP64 , GxB_TIMES_ISLT_FP64 , GxB_ANY_ISLT_FP64 ,
// semirings with multiply op: z = ISGE (x,y), all types x,y,z the same:
GxB_MIN_ISGE_INT8 , GxB_MAX_ISGE_INT8 , GxB_PLUS_ISGE_INT8 , GxB_TIMES_ISGE_INT8 , GxB_ANY_ISGE_INT8 ,
GxB_MIN_ISGE_INT16 , GxB_MAX_ISGE_INT16 , GxB_PLUS_ISGE_INT16 , GxB_TIMES_ISGE_INT16 , GxB_ANY_ISGE_INT16 ,
GxB_MIN_ISGE_INT32 , GxB_MAX_ISGE_INT32 , GxB_PLUS_ISGE_INT32 , GxB_TIMES_ISGE_INT32 , GxB_ANY_ISGE_INT32 ,
GxB_MIN_ISGE_INT64 , GxB_MAX_ISGE_INT64 , GxB_PLUS_ISGE_INT64 , GxB_TIMES_ISGE_INT64 , GxB_ANY_ISGE_INT64 ,
GxB_MIN_ISGE_UINT8 , GxB_MAX_ISGE_UINT8 , GxB_PLUS_ISGE_UINT8 , GxB_TIMES_ISGE_UINT8 , GxB_ANY_ISGE_UINT8 ,
GxB_MIN_ISGE_UINT16 , GxB_MAX_ISGE_UINT16 , GxB_PLUS_ISGE_UINT16 , GxB_TIMES_ISGE_UINT16 , GxB_ANY_ISGE_UINT16 ,
GxB_MIN_ISGE_UINT32 , GxB_MAX_ISGE_UINT32 , GxB_PLUS_ISGE_UINT32 , GxB_TIMES_ISGE_UINT32 , GxB_ANY_ISGE_UINT32 ,
GxB_MIN_ISGE_UINT64 , GxB_MAX_ISGE_UINT64 , GxB_PLUS_ISGE_UINT64 , GxB_TIMES_ISGE_UINT64 , GxB_ANY_ISGE_UINT64 ,
GxB_MIN_ISGE_FP32 , GxB_MAX_ISGE_FP32 , GxB_PLUS_ISGE_FP32 , GxB_TIMES_ISGE_FP32 , GxB_ANY_ISGE_FP32 ,
GxB_MIN_ISGE_FP64 , GxB_MAX_ISGE_FP64 , GxB_PLUS_ISGE_FP64 , GxB_TIMES_ISGE_FP64 , GxB_ANY_ISGE_FP64 ,
// semirings with multiply op: z = ISLE (x,y), all types x,y,z the same:
GxB_MIN_ISLE_INT8 , GxB_MAX_ISLE_INT8 , GxB_PLUS_ISLE_INT8 , GxB_TIMES_ISLE_INT8 , GxB_ANY_ISLE_INT8 ,
GxB_MIN_ISLE_INT16 , GxB_MAX_ISLE_INT16 , GxB_PLUS_ISLE_INT16 , GxB_TIMES_ISLE_INT16 , GxB_ANY_ISLE_INT16 ,
GxB_MIN_ISLE_INT32 , GxB_MAX_ISLE_INT32 , GxB_PLUS_ISLE_INT32 , GxB_TIMES_ISLE_INT32 , GxB_ANY_ISLE_INT32 ,
GxB_MIN_ISLE_INT64 , GxB_MAX_ISLE_INT64 , GxB_PLUS_ISLE_INT64 , GxB_TIMES_ISLE_INT64 , GxB_ANY_ISLE_INT64 ,
GxB_MIN_ISLE_UINT8 , GxB_MAX_ISLE_UINT8 , GxB_PLUS_ISLE_UINT8 , GxB_TIMES_ISLE_UINT8 , GxB_ANY_ISLE_UINT8 ,
GxB_MIN_ISLE_UINT16 , GxB_MAX_ISLE_UINT16 , GxB_PLUS_ISLE_UINT16 , GxB_TIMES_ISLE_UINT16 , GxB_ANY_ISLE_UINT16 ,
GxB_MIN_ISLE_UINT32 , GxB_MAX_ISLE_UINT32 , GxB_PLUS_ISLE_UINT32 , GxB_TIMES_ISLE_UINT32 , GxB_ANY_ISLE_UINT32 ,
GxB_MIN_ISLE_UINT64 , GxB_MAX_ISLE_UINT64 , GxB_PLUS_ISLE_UINT64 , GxB_TIMES_ISLE_UINT64 , GxB_ANY_ISLE_UINT64 ,
GxB_MIN_ISLE_FP32 , GxB_MAX_ISLE_FP32 , GxB_PLUS_ISLE_FP32 , GxB_TIMES_ISLE_FP32 , GxB_ANY_ISLE_FP32 ,
GxB_MIN_ISLE_FP64 , GxB_MAX_ISLE_FP64 , GxB_PLUS_ISLE_FP64 , GxB_TIMES_ISLE_FP64 , GxB_ANY_ISLE_FP64 ,
// semirings with multiply op: z = LOR (x,y), all types x,y,z the same:
GxB_MIN_LOR_INT8 , GxB_MAX_LOR_INT8 , GxB_PLUS_LOR_INT8 , GxB_TIMES_LOR_INT8 , GxB_ANY_LOR_INT8 ,
GxB_MIN_LOR_INT16 , GxB_MAX_LOR_INT16 , GxB_PLUS_LOR_INT16 , GxB_TIMES_LOR_INT16 , GxB_ANY_LOR_INT16 ,
GxB_MIN_LOR_INT32 , GxB_MAX_LOR_INT32 , GxB_PLUS_LOR_INT32 , GxB_TIMES_LOR_INT32 , GxB_ANY_LOR_INT32 ,
GxB_MIN_LOR_INT64 , GxB_MAX_LOR_INT64 , GxB_PLUS_LOR_INT64 , GxB_TIMES_LOR_INT64 , GxB_ANY_LOR_INT64 ,
GxB_MIN_LOR_UINT8 , GxB_MAX_LOR_UINT8 , GxB_PLUS_LOR_UINT8 , GxB_TIMES_LOR_UINT8 , GxB_ANY_LOR_UINT8 ,
GxB_MIN_LOR_UINT16 , GxB_MAX_LOR_UINT16 , GxB_PLUS_LOR_UINT16 , GxB_TIMES_LOR_UINT16 , GxB_ANY_LOR_UINT16 ,
GxB_MIN_LOR_UINT32 , GxB_MAX_LOR_UINT32 , GxB_PLUS_LOR_UINT32 , GxB_TIMES_LOR_UINT32 , GxB_ANY_LOR_UINT32 ,
GxB_MIN_LOR_UINT64 , GxB_MAX_LOR_UINT64 , GxB_PLUS_LOR_UINT64 , GxB_TIMES_LOR_UINT64 , GxB_ANY_LOR_UINT64 ,
GxB_MIN_LOR_FP32 , GxB_MAX_LOR_FP32 , GxB_PLUS_LOR_FP32 , GxB_TIMES_LOR_FP32 , GxB_ANY_LOR_FP32 ,
GxB_MIN_LOR_FP64 , GxB_MAX_LOR_FP64 , GxB_PLUS_LOR_FP64 , GxB_TIMES_LOR_FP64 , GxB_ANY_LOR_FP64 ,
// semirings with multiply op: z = LAND (x,y), all types x,y,z the same:
GxB_MIN_LAND_INT8 , GxB_MAX_LAND_INT8 , GxB_PLUS_LAND_INT8 , GxB_TIMES_LAND_INT8 , GxB_ANY_LAND_INT8 ,
GxB_MIN_LAND_INT16 , GxB_MAX_LAND_INT16 , GxB_PLUS_LAND_INT16 , GxB_TIMES_LAND_INT16 , GxB_ANY_LAND_INT16 ,
GxB_MIN_LAND_INT32 , GxB_MAX_LAND_INT32 , GxB_PLUS_LAND_INT32 , GxB_TIMES_LAND_INT32 , GxB_ANY_LAND_INT32 ,
GxB_MIN_LAND_INT64 , GxB_MAX_LAND_INT64 , GxB_PLUS_LAND_INT64 , GxB_TIMES_LAND_INT64 , GxB_ANY_LAND_INT64 ,
GxB_MIN_LAND_UINT8 , GxB_MAX_LAND_UINT8 , GxB_PLUS_LAND_UINT8 , GxB_TIMES_LAND_UINT8 , GxB_ANY_LAND_UINT8 ,
GxB_MIN_LAND_UINT16 , GxB_MAX_LAND_UINT16 , GxB_PLUS_LAND_UINT16 , GxB_TIMES_LAND_UINT16 , GxB_ANY_LAND_UINT16 ,
GxB_MIN_LAND_UINT32 , GxB_MAX_LAND_UINT32 , GxB_PLUS_LAND_UINT32 , GxB_TIMES_LAND_UINT32 , GxB_ANY_LAND_UINT32 ,
GxB_MIN_LAND_UINT64 , GxB_MAX_LAND_UINT64 , GxB_PLUS_LAND_UINT64 , GxB_TIMES_LAND_UINT64 , GxB_ANY_LAND_UINT64 ,
GxB_MIN_LAND_FP32 , GxB_MAX_LAND_FP32 , GxB_PLUS_LAND_FP32 , GxB_TIMES_LAND_FP32 , GxB_ANY_LAND_FP32 ,
GxB_MIN_LAND_FP64 , GxB_MAX_LAND_FP64 , GxB_PLUS_LAND_FP64 , GxB_TIMES_LAND_FP64 , GxB_ANY_LAND_FP64 ,
// semirings with multiply op: z = LXOR (x,y), all types x,y,z the same:
GxB_MIN_LXOR_INT8 , GxB_MAX_LXOR_INT8 , GxB_PLUS_LXOR_INT8 , GxB_TIMES_LXOR_INT8 , GxB_ANY_LXOR_INT8 ,
GxB_MIN_LXOR_INT16 , GxB_MAX_LXOR_INT16 , GxB_PLUS_LXOR_INT16 , GxB_TIMES_LXOR_INT16 , GxB_ANY_LXOR_INT16 ,
GxB_MIN_LXOR_INT32 , GxB_MAX_LXOR_INT32 , GxB_PLUS_LXOR_INT32 , GxB_TIMES_LXOR_INT32 , GxB_ANY_LXOR_INT32 ,
GxB_MIN_LXOR_INT64 , GxB_MAX_LXOR_INT64 , GxB_PLUS_LXOR_INT64 , GxB_TIMES_LXOR_INT64 , GxB_ANY_LXOR_INT64 ,
GxB_MIN_LXOR_UINT8 , GxB_MAX_LXOR_UINT8 , GxB_PLUS_LXOR_UINT8 , GxB_TIMES_LXOR_UINT8 , GxB_ANY_LXOR_UINT8 ,
GxB_MIN_LXOR_UINT16 , GxB_MAX_LXOR_UINT16 , GxB_PLUS_LXOR_UINT16 , GxB_TIMES_LXOR_UINT16 , GxB_ANY_LXOR_UINT16 ,
GxB_MIN_LXOR_UINT32 , GxB_MAX_LXOR_UINT32 , GxB_PLUS_LXOR_UINT32 , GxB_TIMES_LXOR_UINT32 , GxB_ANY_LXOR_UINT32 ,
GxB_MIN_LXOR_UINT64 , GxB_MAX_LXOR_UINT64 , GxB_PLUS_LXOR_UINT64 , GxB_TIMES_LXOR_UINT64 , GxB_ANY_LXOR_UINT64 ,
GxB_MIN_LXOR_FP32 , GxB_MAX_LXOR_FP32 , GxB_PLUS_LXOR_FP32 , GxB_TIMES_LXOR_FP32 , GxB_ANY_LXOR_FP32 ,
GxB_MIN_LXOR_FP64 , GxB_MAX_LXOR_FP64 , GxB_PLUS_LXOR_FP64 , GxB_TIMES_LXOR_FP64 , GxB_ANY_LXOR_FP64 ,
//------------------------------------------------------------------------------
// 300 semirings with a comparator TxT -> bool, where T is non-Boolean
//------------------------------------------------------------------------------
// In the 4th column the GxB_EQ_*_* semirings could also be called
// GxB_LXNOR_*_*, since the EQ and LXNOR boolean operators are identical
// but those names are not included.
// semirings with multiply op: z = EQ (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_EQ_INT8 , GxB_LAND_EQ_INT8 , GxB_LXOR_EQ_INT8 , GxB_EQ_EQ_INT8 , GxB_ANY_EQ_INT8 ,
GxB_LOR_EQ_INT16 , GxB_LAND_EQ_INT16 , GxB_LXOR_EQ_INT16 , GxB_EQ_EQ_INT16 , GxB_ANY_EQ_INT16 ,
GxB_LOR_EQ_INT32 , GxB_LAND_EQ_INT32 , GxB_LXOR_EQ_INT32 , GxB_EQ_EQ_INT32 , GxB_ANY_EQ_INT32 ,
GxB_LOR_EQ_INT64 , GxB_LAND_EQ_INT64 , GxB_LXOR_EQ_INT64 , GxB_EQ_EQ_INT64 , GxB_ANY_EQ_INT64 ,
GxB_LOR_EQ_UINT8 , GxB_LAND_EQ_UINT8 , GxB_LXOR_EQ_UINT8 , GxB_EQ_EQ_UINT8 , GxB_ANY_EQ_UINT8 ,
GxB_LOR_EQ_UINT16 , GxB_LAND_EQ_UINT16 , GxB_LXOR_EQ_UINT16 , GxB_EQ_EQ_UINT16 , GxB_ANY_EQ_UINT16 ,
GxB_LOR_EQ_UINT32 , GxB_LAND_EQ_UINT32 , GxB_LXOR_EQ_UINT32 , GxB_EQ_EQ_UINT32 , GxB_ANY_EQ_UINT32 ,
GxB_LOR_EQ_UINT64 , GxB_LAND_EQ_UINT64 , GxB_LXOR_EQ_UINT64 , GxB_EQ_EQ_UINT64 , GxB_ANY_EQ_UINT64 ,
GxB_LOR_EQ_FP32 , GxB_LAND_EQ_FP32 , GxB_LXOR_EQ_FP32 , GxB_EQ_EQ_FP32 , GxB_ANY_EQ_FP32 ,
GxB_LOR_EQ_FP64 , GxB_LAND_EQ_FP64 , GxB_LXOR_EQ_FP64 , GxB_EQ_EQ_FP64 , GxB_ANY_EQ_FP64 ,
// semirings with multiply op: z = NE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_NE_INT8 , GxB_LAND_NE_INT8 , GxB_LXOR_NE_INT8 , GxB_EQ_NE_INT8 , GxB_ANY_NE_INT8 ,
GxB_LOR_NE_INT16 , GxB_LAND_NE_INT16 , GxB_LXOR_NE_INT16 , GxB_EQ_NE_INT16 , GxB_ANY_NE_INT16 ,
GxB_LOR_NE_INT32 , GxB_LAND_NE_INT32 , GxB_LXOR_NE_INT32 , GxB_EQ_NE_INT32 , GxB_ANY_NE_INT32 ,
GxB_LOR_NE_INT64 , GxB_LAND_NE_INT64 , GxB_LXOR_NE_INT64 , GxB_EQ_NE_INT64 , GxB_ANY_NE_INT64 ,
GxB_LOR_NE_UINT8 , GxB_LAND_NE_UINT8 , GxB_LXOR_NE_UINT8 , GxB_EQ_NE_UINT8 , GxB_ANY_NE_UINT8 ,
GxB_LOR_NE_UINT16 , GxB_LAND_NE_UINT16 , GxB_LXOR_NE_UINT16 , GxB_EQ_NE_UINT16 , GxB_ANY_NE_UINT16 ,
GxB_LOR_NE_UINT32 , GxB_LAND_NE_UINT32 , GxB_LXOR_NE_UINT32 , GxB_EQ_NE_UINT32 , GxB_ANY_NE_UINT32 ,
GxB_LOR_NE_UINT64 , GxB_LAND_NE_UINT64 , GxB_LXOR_NE_UINT64 , GxB_EQ_NE_UINT64 , GxB_ANY_NE_UINT64 ,
GxB_LOR_NE_FP32 , GxB_LAND_NE_FP32 , GxB_LXOR_NE_FP32 , GxB_EQ_NE_FP32 , GxB_ANY_NE_FP32 ,
GxB_LOR_NE_FP64 , GxB_LAND_NE_FP64 , GxB_LXOR_NE_FP64 , GxB_EQ_NE_FP64 , GxB_ANY_NE_FP64 ,
// semirings with multiply op: z = GT (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_GT_INT8 , GxB_LAND_GT_INT8 , GxB_LXOR_GT_INT8 , GxB_EQ_GT_INT8 , GxB_ANY_GT_INT8 ,
GxB_LOR_GT_INT16 , GxB_LAND_GT_INT16 , GxB_LXOR_GT_INT16 , GxB_EQ_GT_INT16 , GxB_ANY_GT_INT16 ,
GxB_LOR_GT_INT32 , GxB_LAND_GT_INT32 , GxB_LXOR_GT_INT32 , GxB_EQ_GT_INT32 , GxB_ANY_GT_INT32 ,
GxB_LOR_GT_INT64 , GxB_LAND_GT_INT64 , GxB_LXOR_GT_INT64 , GxB_EQ_GT_INT64 , GxB_ANY_GT_INT64 ,
GxB_LOR_GT_UINT8 , GxB_LAND_GT_UINT8 , GxB_LXOR_GT_UINT8 , GxB_EQ_GT_UINT8 , GxB_ANY_GT_UINT8 ,
GxB_LOR_GT_UINT16 , GxB_LAND_GT_UINT16 , GxB_LXOR_GT_UINT16 , GxB_EQ_GT_UINT16 , GxB_ANY_GT_UINT16 ,
GxB_LOR_GT_UINT32 , GxB_LAND_GT_UINT32 , GxB_LXOR_GT_UINT32 , GxB_EQ_GT_UINT32 , GxB_ANY_GT_UINT32 ,
GxB_LOR_GT_UINT64 , GxB_LAND_GT_UINT64 , GxB_LXOR_GT_UINT64 , GxB_EQ_GT_UINT64 , GxB_ANY_GT_UINT64 ,
GxB_LOR_GT_FP32 , GxB_LAND_GT_FP32 , GxB_LXOR_GT_FP32 , GxB_EQ_GT_FP32 , GxB_ANY_GT_FP32 ,
GxB_LOR_GT_FP64 , GxB_LAND_GT_FP64 , GxB_LXOR_GT_FP64 , GxB_EQ_GT_FP64 , GxB_ANY_GT_FP64 ,
// semirings with multiply op: z = LT (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_LT_INT8 , GxB_LAND_LT_INT8 , GxB_LXOR_LT_INT8 , GxB_EQ_LT_INT8 , GxB_ANY_LT_INT8 ,
GxB_LOR_LT_INT16 , GxB_LAND_LT_INT16 , GxB_LXOR_LT_INT16 , GxB_EQ_LT_INT16 , GxB_ANY_LT_INT16 ,
GxB_LOR_LT_INT32 , GxB_LAND_LT_INT32 , GxB_LXOR_LT_INT32 , GxB_EQ_LT_INT32 , GxB_ANY_LT_INT32 ,
GxB_LOR_LT_INT64 , GxB_LAND_LT_INT64 , GxB_LXOR_LT_INT64 , GxB_EQ_LT_INT64 , GxB_ANY_LT_INT64 ,
GxB_LOR_LT_UINT8 , GxB_LAND_LT_UINT8 , GxB_LXOR_LT_UINT8 , GxB_EQ_LT_UINT8 , GxB_ANY_LT_UINT8 ,
GxB_LOR_LT_UINT16 , GxB_LAND_LT_UINT16 , GxB_LXOR_LT_UINT16 , GxB_EQ_LT_UINT16 , GxB_ANY_LT_UINT16 ,
GxB_LOR_LT_UINT32 , GxB_LAND_LT_UINT32 , GxB_LXOR_LT_UINT32 , GxB_EQ_LT_UINT32 , GxB_ANY_LT_UINT32 ,
GxB_LOR_LT_UINT64 , GxB_LAND_LT_UINT64 , GxB_LXOR_LT_UINT64 , GxB_EQ_LT_UINT64 , GxB_ANY_LT_UINT64 ,
GxB_LOR_LT_FP32 , GxB_LAND_LT_FP32 , GxB_LXOR_LT_FP32 , GxB_EQ_LT_FP32 , GxB_ANY_LT_FP32 ,
GxB_LOR_LT_FP64 , GxB_LAND_LT_FP64 , GxB_LXOR_LT_FP64 , GxB_EQ_LT_FP64 , GxB_ANY_LT_FP64 ,
// semirings with multiply op: z = GE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_GE_INT8 , GxB_LAND_GE_INT8 , GxB_LXOR_GE_INT8 , GxB_EQ_GE_INT8 , GxB_ANY_GE_INT8 ,
GxB_LOR_GE_INT16 , GxB_LAND_GE_INT16 , GxB_LXOR_GE_INT16 , GxB_EQ_GE_INT16 , GxB_ANY_GE_INT16 ,
GxB_LOR_GE_INT32 , GxB_LAND_GE_INT32 , GxB_LXOR_GE_INT32 , GxB_EQ_GE_INT32 , GxB_ANY_GE_INT32 ,
GxB_LOR_GE_INT64 , GxB_LAND_GE_INT64 , GxB_LXOR_GE_INT64 , GxB_EQ_GE_INT64 , GxB_ANY_GE_INT64 ,
GxB_LOR_GE_UINT8 , GxB_LAND_GE_UINT8 , GxB_LXOR_GE_UINT8 , GxB_EQ_GE_UINT8 , GxB_ANY_GE_UINT8 ,
GxB_LOR_GE_UINT16 , GxB_LAND_GE_UINT16 , GxB_LXOR_GE_UINT16 , GxB_EQ_GE_UINT16 , GxB_ANY_GE_UINT16 ,
GxB_LOR_GE_UINT32 , GxB_LAND_GE_UINT32 , GxB_LXOR_GE_UINT32 , GxB_EQ_GE_UINT32 , GxB_ANY_GE_UINT32 ,
GxB_LOR_GE_UINT64 , GxB_LAND_GE_UINT64 , GxB_LXOR_GE_UINT64 , GxB_EQ_GE_UINT64 , GxB_ANY_GE_UINT64 ,
GxB_LOR_GE_FP32 , GxB_LAND_GE_FP32 , GxB_LXOR_GE_FP32 , GxB_EQ_GE_FP32 , GxB_ANY_GE_FP32 ,
GxB_LOR_GE_FP64 , GxB_LAND_GE_FP64 , GxB_LXOR_GE_FP64 , GxB_EQ_GE_FP64 , GxB_ANY_GE_FP64 ,
// semirings with multiply op: z = LE (x,y), where z is boolean and x,y are given by the suffix:
GxB_LOR_LE_INT8 , GxB_LAND_LE_INT8 , GxB_LXOR_LE_INT8 , GxB_EQ_LE_INT8 , GxB_ANY_LE_INT8 ,
GxB_LOR_LE_INT16 , GxB_LAND_LE_INT16 , GxB_LXOR_LE_INT16 , GxB_EQ_LE_INT16 , GxB_ANY_LE_INT16 ,
GxB_LOR_LE_INT32 , GxB_LAND_LE_INT32 , GxB_LXOR_LE_INT32 , GxB_EQ_LE_INT32 , GxB_ANY_LE_INT32 ,
GxB_LOR_LE_INT64 , GxB_LAND_LE_INT64 , GxB_LXOR_LE_INT64 , GxB_EQ_LE_INT64 , GxB_ANY_LE_INT64 ,
GxB_LOR_LE_UINT8 , GxB_LAND_LE_UINT8 , GxB_LXOR_LE_UINT8 , GxB_EQ_LE_UINT8 , GxB_ANY_LE_UINT8 ,
GxB_LOR_LE_UINT16 , GxB_LAND_LE_UINT16 , GxB_LXOR_LE_UINT16 , GxB_EQ_LE_UINT16 , GxB_ANY_LE_UINT16 ,
GxB_LOR_LE_UINT32 , GxB_LAND_LE_UINT32 , GxB_LXOR_LE_UINT32 , GxB_EQ_LE_UINT32 , GxB_ANY_LE_UINT32 ,
GxB_LOR_LE_UINT64 , GxB_LAND_LE_UINT64 , GxB_LXOR_LE_UINT64 , GxB_EQ_LE_UINT64 , GxB_ANY_LE_UINT64 ,
GxB_LOR_LE_FP32 , GxB_LAND_LE_FP32 , GxB_LXOR_LE_FP32 , GxB_EQ_LE_FP32 , GxB_ANY_LE_FP32 ,
GxB_LOR_LE_FP64 , GxB_LAND_LE_FP64 , GxB_LXOR_LE_FP64 , GxB_EQ_LE_FP64 , GxB_ANY_LE_FP64 ,
//------------------------------------------------------------------------------
// 55 semirings with purely Boolean types, bool x bool -> bool
//------------------------------------------------------------------------------
// Note that lor_pair, land_pair, and eq_pair are all identical to any_pair.
// These 3 are marked below. GxB_EQ_*_BOOL could be called
// GxB_LXNOR_*_BOOL, and GxB_*_EQ_BOOL could be called GxB_*_LXNOR_BOOL,
// but those names are not included.
// purely boolean semirings in the form GxB_(add monoid)_(multiply operator)_BOOL:
GxB_LOR_FIRST_BOOL , GxB_LAND_FIRST_BOOL , GxB_LXOR_FIRST_BOOL , GxB_EQ_FIRST_BOOL , GxB_ANY_FIRST_BOOL ,
GxB_LOR_SECOND_BOOL , GxB_LAND_SECOND_BOOL , GxB_LXOR_SECOND_BOOL , GxB_EQ_SECOND_BOOL , GxB_ANY_SECOND_BOOL ,
GxB_LOR_PAIR_BOOL/**/ , GxB_LAND_PAIR_BOOL/**/ , GxB_LXOR_PAIR_BOOL , GxB_EQ_PAIR_BOOL/**/ , GxB_ANY_PAIR_BOOL ,
GxB_LOR_LOR_BOOL , GxB_LAND_LOR_BOOL , GxB_LXOR_LOR_BOOL , GxB_EQ_LOR_BOOL , GxB_ANY_LOR_BOOL ,
GxB_LOR_LAND_BOOL , GxB_LAND_LAND_BOOL , GxB_LXOR_LAND_BOOL , GxB_EQ_LAND_BOOL , GxB_ANY_LAND_BOOL ,
GxB_LOR_LXOR_BOOL , GxB_LAND_LXOR_BOOL , GxB_LXOR_LXOR_BOOL , GxB_EQ_LXOR_BOOL , GxB_ANY_LXOR_BOOL ,
GxB_LOR_EQ_BOOL , GxB_LAND_EQ_BOOL , GxB_LXOR_EQ_BOOL , GxB_EQ_EQ_BOOL , GxB_ANY_EQ_BOOL ,
GxB_LOR_GT_BOOL , GxB_LAND_GT_BOOL , GxB_LXOR_GT_BOOL , GxB_EQ_GT_BOOL , GxB_ANY_GT_BOOL ,
GxB_LOR_LT_BOOL , GxB_LAND_LT_BOOL , GxB_LXOR_LT_BOOL , GxB_EQ_LT_BOOL , GxB_ANY_LT_BOOL ,
GxB_LOR_GE_BOOL , GxB_LAND_GE_BOOL , GxB_LXOR_GE_BOOL , GxB_EQ_GE_BOOL , GxB_ANY_GE_BOOL ,
GxB_LOR_LE_BOOL , GxB_LAND_LE_BOOL , GxB_LXOR_LE_BOOL , GxB_EQ_LE_BOOL , GxB_ANY_LE_BOOL ,
//------------------------------------------------------------------------------
// 54 complex semirings
//------------------------------------------------------------------------------
// 3 monoids (plus, times, any), 2 types (FC32 and FC64), and 9
// multiplicative operators.
// Note that times_pair is identical to any_pair.
// These 2 are marked below.
GxB_PLUS_FIRST_FC32 , GxB_TIMES_FIRST_FC32 , GxB_ANY_FIRST_FC32 ,
GxB_PLUS_FIRST_FC64 , GxB_TIMES_FIRST_FC64 , GxB_ANY_FIRST_FC64 ,
GxB_PLUS_SECOND_FC32 , GxB_TIMES_SECOND_FC32 , GxB_ANY_SECOND_FC32 ,
GxB_PLUS_SECOND_FC64 , GxB_TIMES_SECOND_FC64 , GxB_ANY_SECOND_FC64 ,
GxB_PLUS_PAIR_FC32 , GxB_TIMES_PAIR_FC32/**/, GxB_ANY_PAIR_FC32 ,
GxB_PLUS_PAIR_FC64 , GxB_TIMES_PAIR_FC64/**/, GxB_ANY_PAIR_FC64 ,
GxB_PLUS_PLUS_FC32 , GxB_TIMES_PLUS_FC32 , GxB_ANY_PLUS_FC32 ,
GxB_PLUS_PLUS_FC64 , GxB_TIMES_PLUS_FC64 , GxB_ANY_PLUS_FC64 ,
GxB_PLUS_MINUS_FC32 , GxB_TIMES_MINUS_FC32 , GxB_ANY_MINUS_FC32 ,
GxB_PLUS_MINUS_FC64 , GxB_TIMES_MINUS_FC64 , GxB_ANY_MINUS_FC64 ,
GxB_PLUS_TIMES_FC32 , GxB_TIMES_TIMES_FC32 , GxB_ANY_TIMES_FC32 ,
GxB_PLUS_TIMES_FC64 , GxB_TIMES_TIMES_FC64 , GxB_ANY_TIMES_FC64 ,
GxB_PLUS_DIV_FC32 , GxB_TIMES_DIV_FC32 , GxB_ANY_DIV_FC32 ,
GxB_PLUS_DIV_FC64 , GxB_TIMES_DIV_FC64 , GxB_ANY_DIV_FC64 ,
GxB_PLUS_RDIV_FC32 , GxB_TIMES_RDIV_FC32 , GxB_ANY_RDIV_FC32 ,
GxB_PLUS_RDIV_FC64 , GxB_TIMES_RDIV_FC64 , GxB_ANY_RDIV_FC64 ,
GxB_PLUS_RMINUS_FC32 , GxB_TIMES_RMINUS_FC32 , GxB_ANY_RMINUS_FC32 ,
GxB_PLUS_RMINUS_FC64 , GxB_TIMES_RMINUS_FC64 , GxB_ANY_RMINUS_FC64 ,
//------------------------------------------------------------------------------
// 64 bitwise semirings
//------------------------------------------------------------------------------
// monoids: (BOR, BAND, BXOR, BXNOR) x
// mult: (BOR, BAND, BXOR, BXNOR) x
// types: (UINT8, UINT16, UINT32, UINT64)
GxB_BOR_BOR_UINT8 , GxB_BOR_BOR_UINT16 , GxB_BOR_BOR_UINT32 , GxB_BOR_BOR_UINT64 ,
GxB_BOR_BAND_UINT8 , GxB_BOR_BAND_UINT16 , GxB_BOR_BAND_UINT32 , GxB_BOR_BAND_UINT64 ,
GxB_BOR_BXOR_UINT8 , GxB_BOR_BXOR_UINT16 , GxB_BOR_BXOR_UINT32 , GxB_BOR_BXOR_UINT64 ,
GxB_BOR_BXNOR_UINT8 , GxB_BOR_BXNOR_UINT16 , GxB_BOR_BXNOR_UINT32 , GxB_BOR_BXNOR_UINT64 ,
GxB_BAND_BOR_UINT8 , GxB_BAND_BOR_UINT16 , GxB_BAND_BOR_UINT32 , GxB_BAND_BOR_UINT64 ,
GxB_BAND_BAND_UINT8 , GxB_BAND_BAND_UINT16 , GxB_BAND_BAND_UINT32 , GxB_BAND_BAND_UINT64 ,
GxB_BAND_BXOR_UINT8 , GxB_BAND_BXOR_UINT16 , GxB_BAND_BXOR_UINT32 , GxB_BAND_BXOR_UINT64 ,
GxB_BAND_BXNOR_UINT8 , GxB_BAND_BXNOR_UINT16 , GxB_BAND_BXNOR_UINT32 , GxB_BAND_BXNOR_UINT64 ,
GxB_BXOR_BOR_UINT8 , GxB_BXOR_BOR_UINT16 , GxB_BXOR_BOR_UINT32 , GxB_BXOR_BOR_UINT64 ,
GxB_BXOR_BAND_UINT8 , GxB_BXOR_BAND_UINT16 , GxB_BXOR_BAND_UINT32 , GxB_BXOR_BAND_UINT64 ,
GxB_BXOR_BXOR_UINT8 , GxB_BXOR_BXOR_UINT16 , GxB_BXOR_BXOR_UINT32 , GxB_BXOR_BXOR_UINT64 ,
GxB_BXOR_BXNOR_UINT8 , GxB_BXOR_BXNOR_UINT16 , GxB_BXOR_BXNOR_UINT32 , GxB_BXOR_BXNOR_UINT64 ,
GxB_BXNOR_BOR_UINT8 , GxB_BXNOR_BOR_UINT16 , GxB_BXNOR_BOR_UINT32 , GxB_BXNOR_BOR_UINT64 ,
GxB_BXNOR_BAND_UINT8 , GxB_BXNOR_BAND_UINT16 , GxB_BXNOR_BAND_UINT32 , GxB_BXNOR_BAND_UINT64 ,
GxB_BXNOR_BXOR_UINT8 , GxB_BXNOR_BXOR_UINT16 , GxB_BXNOR_BXOR_UINT32 , GxB_BXNOR_BXOR_UINT64 ,
GxB_BXNOR_BXNOR_UINT8 , GxB_BXNOR_BXNOR_UINT16 , GxB_BXNOR_BXNOR_UINT32 , GxB_BXNOR_BXNOR_UINT64 ,
//------------------------------------------------------------------------------
// 80 positional semirings
//------------------------------------------------------------------------------
// monoids: (MIN, MAX, ANY, PLUS, TIMES) x
// mult: (FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, SECONDI, SECONDI1, SECONDJ, SECONDJ1)
// types: (INT32, INT64)
GxB_MIN_FIRSTI_INT32, GxB_MIN_FIRSTI_INT64,
GxB_MAX_FIRSTI_INT32, GxB_MAX_FIRSTI_INT64,
GxB_ANY_FIRSTI_INT32, GxB_ANY_FIRSTI_INT64,
GxB_PLUS_FIRSTI_INT32, GxB_PLUS_FIRSTI_INT64,
GxB_TIMES_FIRSTI_INT32, GxB_TIMES_FIRSTI_INT64,
GxB_MIN_FIRSTI1_INT32, GxB_MIN_FIRSTI1_INT64,
GxB_MAX_FIRSTI1_INT32, GxB_MAX_FIRSTI1_INT64,
GxB_ANY_FIRSTI1_INT32, GxB_ANY_FIRSTI1_INT64,
GxB_PLUS_FIRSTI1_INT32, GxB_PLUS_FIRSTI1_INT64,
GxB_TIMES_FIRSTI1_INT32, GxB_TIMES_FIRSTI1_INT64,
GxB_MIN_FIRSTJ_INT32, GxB_MIN_FIRSTJ_INT64,
GxB_MAX_FIRSTJ_INT32, GxB_MAX_FIRSTJ_INT64,
GxB_ANY_FIRSTJ_INT32, GxB_ANY_FIRSTJ_INT64,
GxB_PLUS_FIRSTJ_INT32, GxB_PLUS_FIRSTJ_INT64,
GxB_TIMES_FIRSTJ_INT32, GxB_TIMES_FIRSTJ_INT64,
GxB_MIN_FIRSTJ1_INT32, GxB_MIN_FIRSTJ1_INT64,
GxB_MAX_FIRSTJ1_INT32, GxB_MAX_FIRSTJ1_INT64,
GxB_ANY_FIRSTJ1_INT32, GxB_ANY_FIRSTJ1_INT64,
GxB_PLUS_FIRSTJ1_INT32, GxB_PLUS_FIRSTJ1_INT64,
GxB_TIMES_FIRSTJ1_INT32, GxB_TIMES_FIRSTJ1_INT64,
GxB_MIN_SECONDI_INT32, GxB_MIN_SECONDI_INT64,
GxB_MAX_SECONDI_INT32, GxB_MAX_SECONDI_INT64,
GxB_ANY_SECONDI_INT32, GxB_ANY_SECONDI_INT64,
GxB_PLUS_SECONDI_INT32, GxB_PLUS_SECONDI_INT64,
GxB_TIMES_SECONDI_INT32, GxB_TIMES_SECONDI_INT64,
GxB_MIN_SECONDI1_INT32, GxB_MIN_SECONDI1_INT64,
GxB_MAX_SECONDI1_INT32, GxB_MAX_SECONDI1_INT64,
GxB_ANY_SECONDI1_INT32, GxB_ANY_SECONDI1_INT64,
GxB_PLUS_SECONDI1_INT32, GxB_PLUS_SECONDI1_INT64,
GxB_TIMES_SECONDI1_INT32, GxB_TIMES_SECONDI1_INT64,
GxB_MIN_SECONDJ_INT32, GxB_MIN_SECONDJ_INT64,
GxB_MAX_SECONDJ_INT32, GxB_MAX_SECONDJ_INT64,
GxB_ANY_SECONDJ_INT32, GxB_ANY_SECONDJ_INT64,
GxB_PLUS_SECONDJ_INT32, GxB_PLUS_SECONDJ_INT64,
GxB_TIMES_SECONDJ_INT32, GxB_TIMES_SECONDJ_INT64,
GxB_MIN_SECONDJ1_INT32, GxB_MIN_SECONDJ1_INT64,
GxB_MAX_SECONDJ1_INT32, GxB_MAX_SECONDJ1_INT64,
GxB_ANY_SECONDJ1_INT32, GxB_ANY_SECONDJ1_INT64,
GxB_PLUS_SECONDJ1_INT32, GxB_PLUS_SECONDJ1_INT64,
GxB_TIMES_SECONDJ1_INT32, GxB_TIMES_SECONDJ1_INT64 ;
//------------------------------------------------------------------------------
// GrB_* semirings
//------------------------------------------------------------------------------
// The v1.3 C API for GraphBLAS adds the following 124 predefined semirings,
// with GrB_* names. They are identical to 124 GxB_* semirings defined above,
// with the same name, except that GrB_LXNOR_LOR_SEMIRING_BOOL is identical to
// GxB_EQ_LOR_BOOL (since GrB_EQ_BOOL == GrB_LXNOR). The old names are listed
// below alongside each new name; the new GrB_* names are preferred.
// 12 kinds of GrB_* semirings are available for all 10 real non-boolean types:
// PLUS_TIMES, PLUS_MIN,
// MIN_PLUS, MIN_TIMES, MIN_FIRST, MIN_SECOND, MIN_MAX,
// MAX_PLUS, MAX_TIMES, MAX_FIRST, MAX_SECOND, MAX_MIN
// and 4 semirings for boolean only:
// LOR_LAND, LAND_LOR, LXOR_LAND, LXNOR_LOR.
// GxB_* semirings corresponding to the equivalent GrB_* semiring are
// historical.
GB_PUBLIC GrB_Semiring
//--------------------------------------------------------------------------
// 20 semirings with PLUS monoids
//--------------------------------------------------------------------------
// PLUS_TIMES semirings for all 10 real, non-boolean types:
GrB_PLUS_TIMES_SEMIRING_INT8, // GxB_PLUS_TIMES_INT8
GrB_PLUS_TIMES_SEMIRING_INT16, // GxB_PLUS_TIMES_INT16
GrB_PLUS_TIMES_SEMIRING_INT32, // GxB_PLUS_TIMES_INT32
GrB_PLUS_TIMES_SEMIRING_INT64, // GxB_PLUS_TIMES_INT64
GrB_PLUS_TIMES_SEMIRING_UINT8, // GxB_PLUS_TIMES_UINT8
GrB_PLUS_TIMES_SEMIRING_UINT16, // GxB_PLUS_TIMES_UINT16
GrB_PLUS_TIMES_SEMIRING_UINT32, // GxB_PLUS_TIMES_UINT32
GrB_PLUS_TIMES_SEMIRING_UINT64, // GxB_PLUS_TIMES_UINT64
GrB_PLUS_TIMES_SEMIRING_FP32, // GxB_PLUS_TIMES_FP32
GrB_PLUS_TIMES_SEMIRING_FP64, // GxB_PLUS_TIMES_FP64
// PLUS_MIN semirings for all 10 real, non-boolean types:
GrB_PLUS_MIN_SEMIRING_INT8, // GxB_PLUS_MIN_INT8
GrB_PLUS_MIN_SEMIRING_INT16, // GxB_PLUS_MIN_INT16
GrB_PLUS_MIN_SEMIRING_INT32, // GxB_PLUS_MIN_INT32
GrB_PLUS_MIN_SEMIRING_INT64, // GxB_PLUS_MIN_INT64
GrB_PLUS_MIN_SEMIRING_UINT8, // GxB_PLUS_MIN_UINT8
GrB_PLUS_MIN_SEMIRING_UINT16, // GxB_PLUS_MIN_UINT16
GrB_PLUS_MIN_SEMIRING_UINT32, // GxB_PLUS_MIN_UINT32
GrB_PLUS_MIN_SEMIRING_UINT64, // GxB_PLUS_MIN_UINT64
GrB_PLUS_MIN_SEMIRING_FP32, // GxB_PLUS_MIN_FP32
GrB_PLUS_MIN_SEMIRING_FP64, // GxB_PLUS_MIN_FP64
//--------------------------------------------------------------------------
// 50 semirings with MIN monoids
//--------------------------------------------------------------------------
// MIN_PLUS semirings for all 10 real, non-boolean types:
GrB_MIN_PLUS_SEMIRING_INT8, // GxB_MIN_PLUS_INT8
GrB_MIN_PLUS_SEMIRING_INT16, // GxB_MIN_PLUS_INT16
GrB_MIN_PLUS_SEMIRING_INT32, // GxB_MIN_PLUS_INT32
GrB_MIN_PLUS_SEMIRING_INT64, // GxB_MIN_PLUS_INT64
GrB_MIN_PLUS_SEMIRING_UINT8, // GxB_MIN_PLUS_UINT8
GrB_MIN_PLUS_SEMIRING_UINT16, // GxB_MIN_PLUS_UINT16
GrB_MIN_PLUS_SEMIRING_UINT32, // GxB_MIN_PLUS_UINT32
GrB_MIN_PLUS_SEMIRING_UINT64, // GxB_MIN_PLUS_UINT64
GrB_MIN_PLUS_SEMIRING_FP32, // GxB_MIN_PLUS_FP32
GrB_MIN_PLUS_SEMIRING_FP64, // GxB_MIN_PLUS_FP64
// MIN_TIMES semirings for all 10 real, non-boolean types:
GrB_MIN_TIMES_SEMIRING_INT8, // GxB_MIN_TIMES_INT8
GrB_MIN_TIMES_SEMIRING_INT16, // GxB_MIN_TIMES_INT16
GrB_MIN_TIMES_SEMIRING_INT32, // GxB_MIN_TIMES_INT32
GrB_MIN_TIMES_SEMIRING_INT64, // GxB_MIN_TIMES_INT64
GrB_MIN_TIMES_SEMIRING_UINT8, // GxB_MIN_TIMES_UINT8
GrB_MIN_TIMES_SEMIRING_UINT16, // GxB_MIN_TIMES_UINT16
GrB_MIN_TIMES_SEMIRING_UINT32, // GxB_MIN_TIMES_UINT32
GrB_MIN_TIMES_SEMIRING_UINT64, // GxB_MIN_TIMES_UINT64
GrB_MIN_TIMES_SEMIRING_FP32, // GxB_MIN_TIMES_FP32
GrB_MIN_TIMES_SEMIRING_FP64, // GxB_MIN_TIMES_FP64
// MIN_FIRST semirings for all 10 real, non-boolean types:
GrB_MIN_FIRST_SEMIRING_INT8, // GxB_MIN_FIRST_INT8
GrB_MIN_FIRST_SEMIRING_INT16, // GxB_MIN_FIRST_INT16
GrB_MIN_FIRST_SEMIRING_INT32, // GxB_MIN_FIRST_INT32
GrB_MIN_FIRST_SEMIRING_INT64, // GxB_MIN_FIRST_INT64
GrB_MIN_FIRST_SEMIRING_UINT8, // GxB_MIN_FIRST_UINT8
GrB_MIN_FIRST_SEMIRING_UINT16, // GxB_MIN_FIRST_UINT16
GrB_MIN_FIRST_SEMIRING_UINT32, // GxB_MIN_FIRST_UINT32
GrB_MIN_FIRST_SEMIRING_UINT64, // GxB_MIN_FIRST_UINT64
GrB_MIN_FIRST_SEMIRING_FP32, // GxB_MIN_FIRST_FP32
GrB_MIN_FIRST_SEMIRING_FP64, // GxB_MIN_FIRST_FP64
// MIN_SECOND semirings for all 10 real, non-boolean types:
GrB_MIN_SECOND_SEMIRING_INT8, // GxB_MIN_SECOND_INT8
GrB_MIN_SECOND_SEMIRING_INT16, // GxB_MIN_SECOND_INT16
GrB_MIN_SECOND_SEMIRING_INT32, // GxB_MIN_SECOND_INT32
GrB_MIN_SECOND_SEMIRING_INT64, // GxB_MIN_SECOND_INT64
GrB_MIN_SECOND_SEMIRING_UINT8, // GxB_MIN_SECOND_UINT8
GrB_MIN_SECOND_SEMIRING_UINT16, // GxB_MIN_SECOND_UINT16
GrB_MIN_SECOND_SEMIRING_UINT32, // GxB_MIN_SECOND_UINT32
GrB_MIN_SECOND_SEMIRING_UINT64, // GxB_MIN_SECOND_UINT64
GrB_MIN_SECOND_SEMIRING_FP32, // GxB_MIN_SECOND_FP32
GrB_MIN_SECOND_SEMIRING_FP64, // GxB_MIN_SECOND_FP64
// MIN_MAX semirings for all 10 real, non-boolean types:
GrB_MIN_MAX_SEMIRING_INT8, // GxB_MIN_MAX_INT8
GrB_MIN_MAX_SEMIRING_INT16, // GxB_MIN_MAX_INT16
GrB_MIN_MAX_SEMIRING_INT32, // GxB_MIN_MAX_INT32
GrB_MIN_MAX_SEMIRING_INT64, // GxB_MIN_MAX_INT64
GrB_MIN_MAX_SEMIRING_UINT8, // GxB_MIN_MAX_UINT8
GrB_MIN_MAX_SEMIRING_UINT16, // GxB_MIN_MAX_UINT16
GrB_MIN_MAX_SEMIRING_UINT32, // GxB_MIN_MAX_UINT32
GrB_MIN_MAX_SEMIRING_UINT64, // GxB_MIN_MAX_UINT64
GrB_MIN_MAX_SEMIRING_FP32, // GxB_MIN_MAX_FP32
GrB_MIN_MAX_SEMIRING_FP64, // GxB_MIN_MAX_FP64
//--------------------------------------------------------------------------
// 50 semirings with MAX monoids
//--------------------------------------------------------------------------
// MAX_PLUS semirings for all 10 real, non-boolean types
GrB_MAX_PLUS_SEMIRING_INT8, // GxB_MAX_PLUS_INT8
GrB_MAX_PLUS_SEMIRING_INT16, // GxB_MAX_PLUS_INT16
GrB_MAX_PLUS_SEMIRING_INT32, // GxB_MAX_PLUS_INT32
GrB_MAX_PLUS_SEMIRING_INT64, // GxB_MAX_PLUS_INT64
GrB_MAX_PLUS_SEMIRING_UINT8, // GxB_MAX_PLUS_UINT8
GrB_MAX_PLUS_SEMIRING_UINT16, // GxB_MAX_PLUS_UINT16
GrB_MAX_PLUS_SEMIRING_UINT32, // GxB_MAX_PLUS_UINT32
GrB_MAX_PLUS_SEMIRING_UINT64, // GxB_MAX_PLUS_UINT64
GrB_MAX_PLUS_SEMIRING_FP32, // GxB_MAX_PLUS_FP32
GrB_MAX_PLUS_SEMIRING_FP64, // GxB_MAX_PLUS_FP64
// MAX_TIMES semirings for all 10 real, non-boolean types:
GrB_MAX_TIMES_SEMIRING_INT8, // GxB_MAX_TIMES_INT8
GrB_MAX_TIMES_SEMIRING_INT16, // GxB_MAX_TIMES_INT16
GrB_MAX_TIMES_SEMIRING_INT32, // GxB_MAX_TIMES_INT32
GrB_MAX_TIMES_SEMIRING_INT64, // GxB_MAX_TIMES_INT64
GrB_MAX_TIMES_SEMIRING_UINT8, // GxB_MAX_TIMES_UINT8
GrB_MAX_TIMES_SEMIRING_UINT16, // GxB_MAX_TIMES_UINT16
GrB_MAX_TIMES_SEMIRING_UINT32, // GxB_MAX_TIMES_UINT32
GrB_MAX_TIMES_SEMIRING_UINT64, // GxB_MAX_TIMES_UINT64
GrB_MAX_TIMES_SEMIRING_FP32, // GxB_MAX_TIMES_FP32
GrB_MAX_TIMES_SEMIRING_FP64, // GxB_MAX_TIMES_FP64
// MAX_FIRST semirings for all 10 real, non-boolean types:
GrB_MAX_FIRST_SEMIRING_INT8, // GxB_MAX_FIRST_INT8
GrB_MAX_FIRST_SEMIRING_INT16, // GxB_MAX_FIRST_INT16
GrB_MAX_FIRST_SEMIRING_INT32, // GxB_MAX_FIRST_INT32
GrB_MAX_FIRST_SEMIRING_INT64, // GxB_MAX_FIRST_INT64
GrB_MAX_FIRST_SEMIRING_UINT8, // GxB_MAX_FIRST_UINT8
GrB_MAX_FIRST_SEMIRING_UINT16, // GxB_MAX_FIRST_UINT16
GrB_MAX_FIRST_SEMIRING_UINT32, // GxB_MAX_FIRST_UINT32
GrB_MAX_FIRST_SEMIRING_UINT64, // GxB_MAX_FIRST_UINT64
GrB_MAX_FIRST_SEMIRING_FP32, // GxB_MAX_FIRST_FP32
GrB_MAX_FIRST_SEMIRING_FP64, // GxB_MAX_FIRST_FP64
// MAX_SECOND semirings for all 10 real, non-boolean types:
GrB_MAX_SECOND_SEMIRING_INT8, // GxB_MAX_SECOND_INT8
GrB_MAX_SECOND_SEMIRING_INT16, // GxB_MAX_SECOND_INT16
GrB_MAX_SECOND_SEMIRING_INT32, // GxB_MAX_SECOND_INT32
GrB_MAX_SECOND_SEMIRING_INT64, // GxB_MAX_SECOND_INT64
GrB_MAX_SECOND_SEMIRING_UINT8, // GxB_MAX_SECOND_UINT8
GrB_MAX_SECOND_SEMIRING_UINT16, // GxB_MAX_SECOND_UINT16
GrB_MAX_SECOND_SEMIRING_UINT32, // GxB_MAX_SECOND_UINT32
GrB_MAX_SECOND_SEMIRING_UINT64, // GxB_MAX_SECOND_UINT64
GrB_MAX_SECOND_SEMIRING_FP32, // GxB_MAX_SECOND_FP32
GrB_MAX_SECOND_SEMIRING_FP64, // GxB_MAX_SECOND_FP64
// MAX_MIN semirings for all 10 real, non-boolean types:
GrB_MAX_MIN_SEMIRING_INT8, // GxB_MAX_MIN_INT8
GrB_MAX_MIN_SEMIRING_INT16, // GxB_MAX_MIN_INT16
GrB_MAX_MIN_SEMIRING_INT32, // GxB_MAX_MIN_INT32
GrB_MAX_MIN_SEMIRING_INT64, // GxB_MAX_MIN_INT64
GrB_MAX_MIN_SEMIRING_UINT8, // GxB_MAX_MIN_UINT8
GrB_MAX_MIN_SEMIRING_UINT16, // GxB_MAX_MIN_UINT16
GrB_MAX_MIN_SEMIRING_UINT32, // GxB_MAX_MIN_UINT32
GrB_MAX_MIN_SEMIRING_UINT64, // GxB_MAX_MIN_UINT64
GrB_MAX_MIN_SEMIRING_FP32, // GxB_MAX_MIN_FP32
GrB_MAX_MIN_SEMIRING_FP64, // GxB_MAX_MIN_FP64
//--------------------------------------------------------------------------
// 4 boolean semirings:
//--------------------------------------------------------------------------
GrB_LOR_LAND_SEMIRING_BOOL, // GxB_LOR_LAND_BOOL
GrB_LAND_LOR_SEMIRING_BOOL, // GxB_LAND_LOR_BOOL
GrB_LXOR_LAND_SEMIRING_BOOL, // GxB_LXOR_LAND_BOOL
GrB_LXNOR_LOR_SEMIRING_BOOL ; // GxB_EQ_LOR_BOOL (note EQ == LXNOR)
//==============================================================================
// GrB_*_resize: change the size of a matrix or vector
//==============================================================================
// If the dimensions decrease, entries that fall outside the resized matrix or
// vector are deleted.
GB_PUBLIC
GrB_Info GrB_Matrix_resize // change the size of a matrix
(
GrB_Matrix C, // matrix to modify
GrB_Index nrows_new, // new number of rows in matrix
GrB_Index ncols_new // new number of columns in matrix
) ;
GB_PUBLIC
GrB_Info GrB_Vector_resize // change the size of a vector
(
GrB_Vector w, // vector to modify
GrB_Index nrows_new // new number of rows in vector
) ;
// GxB_*_resize are identical to the GrB_*resize methods above
GB_PUBLIC
GrB_Info GxB_Matrix_resize // change the size of a matrix (historical)
(
GrB_Matrix C, // matrix to modify
GrB_Index nrows_new, // new number of rows in matrix
GrB_Index ncols_new // new number of columns in matrix
) ;
GB_PUBLIC
GrB_Info GxB_Vector_resize // change the size of a vector (historical)
(
GrB_Vector w, // vector to modify
GrB_Index nrows_new // new number of rows in vector
) ;
// GxB_resize is a generic function for resizing a matrix or vector:
// GrB_Vector_resize (u,nrows_new)
// GrB_Matrix_resize (A,nrows_new,ncols_new)
#if GxB_STDC_VERSION >= 201112L
#define GxB_resize(arg1,...) \
_Generic \
( \
(arg1), \
GrB_Vector : GrB_Vector_resize , \
GrB_Matrix : GrB_Matrix_resize \
) \
(arg1, __VA_ARGS__)
#endif
//==============================================================================
// GxB_fprint and GxB_print: print the contents of a GraphBLAS object
//==============================================================================
// GxB_fprint (object, GxB_Print_Level pr, FILE *f) prints the contents of any
// of the 9 GraphBLAS objects to the file f, and also does an extensive test on
// the object to determine if it is valid. It returns one of the following
// error conditions:
//
// GrB_SUCCESS object is valid
// GrB_UNINITIALIZED_OBJECT object is not initialized
// GrB_INVALID_OBJECT object is not valid
// GrB_NULL_POINTER object is a NULL pointer
// GrB_INVALID_VALUE fprintf returned an I/O error; see the ANSI C
// errno or GrB_error( )for details.
//
// GxB_fprint does not modify the status of any object. If a matrix or vector
// has not been completed, the pending computations are guaranteed to *not* be
// performed by GxB_fprint. The reason is simple. It is possible for a bug in
// the user application (such as accessing memory outside the bounds of an
// array) to mangle the internal content of a GraphBLAS object, and GxB_fprint
// can be a helpful tool to track down this bug. If GxB_fprint attempted to
// complete any computations prior to printing or checking the contents of the
// matrix or vector, then further errors could occur, including a segfault.
//
// The type-specific functions include an additional argument, the name string.
// The name is printed at the beginning of the display (assuming pr is not
// GxB_SILENT) so that the object can be more easily identified in the output.
// For the type-generic methods GxB_fprint and GxB_print, the name string is
// the variable name of the object itself.
//
// If f is NULL, stdout is used; this is not an error condition. If pr is
// outside the bounds 0 to 3, negative values are treated as GxB_SILENT, and
// values > 3 are treated as GxB_COMPLETE. If name is NULL, it is treated as
// the empty string.
//
// GxB_print (object, GxB_Print_Level pr) is the same as GxB_fprint, except
// that it prints the contents with printf instead of fprintf to a file f.
//
// The exact content and format of what is printed is implementation-dependent,
// and will change from version to version of SuiteSparse:GraphBLAS. Do not
// attempt to rely on the exact content or format by trying to parse the
// resulting output via another program. The intent of these functions is to
// produce a report of the object for visual inspection.
typedef enum
{
GxB_SILENT = 0, // nothing is printed, just check the object
GxB_SUMMARY = 1, // print a terse summary
GxB_SHORT = 2, // short description, about 30 entries of a matrix
GxB_COMPLETE = 3, // print the entire contents of the object
GxB_SHORT_VERBOSE = 4, // GxB_SHORT but with "%.15g" for doubles
GxB_COMPLETE_VERBOSE = 5 // GxB_COMPLETE but with "%.15g" for doubles
}
GxB_Print_Level ;
GB_PUBLIC
GrB_Info GxB_Type_fprint // print and check a GrB_Type
(
GrB_Type type, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_UnaryOp_fprint // print and check a GrB_UnaryOp
(
GrB_UnaryOp unaryop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_BinaryOp_fprint // print and check a GrB_BinaryOp
(
GrB_BinaryOp binaryop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_IndexUnaryOp_fprint // print and check a GrB_IndexUnaryOp
(
GrB_IndexUnaryOp op, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_SelectOp_fprint // print and check a GxB_SelectOp
(
GxB_SelectOp selectop, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Monoid_fprint // print and check a GrB_Monoid
(
GrB_Monoid monoid, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Semiring_fprint // print and check a GrB_Semiring
(
GrB_Semiring semiring, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Descriptor_fprint // print and check a GrB_Descriptor
(
GrB_Descriptor descriptor, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_fprint // print and check a GrB_Matrix
(
GrB_Matrix A, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Vector_fprint // print and check a GrB_Vector
(
GrB_Vector v, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
GB_PUBLIC
GrB_Info GxB_Scalar_fprint // print and check a GrB_Scalar
(
GrB_Scalar s, // object to print and check
const char *name, // name of the object
GxB_Print_Level pr, // print level
FILE *f // file for output
) ;
#if GxB_STDC_VERSION >= 201112L
#define GxB_fprint(object,pr,f) \
_Generic \
( \
(object), \
const GrB_Type : GxB_Type_fprint , \
GrB_Type : GxB_Type_fprint , \
const GrB_UnaryOp : GxB_UnaryOp_fprint , \
GrB_UnaryOp : GxB_UnaryOp_fprint , \
const GrB_BinaryOp : GxB_BinaryOp_fprint , \
GrB_BinaryOp : GxB_BinaryOp_fprint , \
const GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \
GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \
const GxB_SelectOp : GxB_SelectOp_fprint , \
GxB_SelectOp : GxB_SelectOp_fprint , \
const GrB_Monoid : GxB_Monoid_fprint , \
GrB_Monoid : GxB_Monoid_fprint , \
const GrB_Semiring : GxB_Semiring_fprint , \
GrB_Semiring : GxB_Semiring_fprint , \
const GrB_Scalar : GxB_Scalar_fprint , \
GrB_Scalar : GxB_Scalar_fprint , \
const GrB_Vector : GxB_Vector_fprint , \
GrB_Vector : GxB_Vector_fprint , \
const GrB_Matrix : GxB_Matrix_fprint , \
GrB_Matrix : GxB_Matrix_fprint , \
const GrB_Descriptor : GxB_Descriptor_fprint , \
GrB_Descriptor : GxB_Descriptor_fprint \
) \
(object, GB_STR(object), pr, f)
#define GxB_print(object,pr) GxB_fprint(object,pr,NULL)
#endif
//==============================================================================
// Matrix and vector import/export/pack/unpack
//==============================================================================
// The import/export/pack/unpack functions allow the user application to create
// a GrB_Matrix or GrB_Vector object, and to extract its contents, faster and
// with less memory overhead than the GrB_*_build and GrB_*_extractTuples
// functions.
// The semantics of import/export/pack/unpack are the same as the "move
// constructor" in C++. On import, the user provides a set of arrays that have
// been previously allocated via the ANSI C malloc function. The arrays define
// the content of the matrix or vector. Unlike GrB_*_build, the GraphBLAS
// library then takes ownership of the user's input arrays and may either (a)
// incorporate them into its internal data structure for the new GrB_Matrix or
// GrB_Vector, potentially creating the GrB_Matrix or GrB_Vector in constant
// time with no memory copying performed, or (b) if the library does not
// support the import format directly, then it may convert the input to its
// internal format, and then free the user's input arrays. GraphBLAS may also
// choose to use a mix of the two strategies. In either case, the input arrays
// are no longer "owned" by the user application. If A is a GrB_Matrix created
// by an import/pack, the user input arrays are freed no later than GrB_free
// (&A), and may be freed earlier, at the discretion of the GraphBLAS library.
// The data structure of the GrB_Matrix and GrB_Vector remain opaque.
// The export/unpack of a GrB_Matrix or GrB_Vector is symmetric with the import
// operation. The export is destructive, where the GrB_Matrix or GrB_Vector no
// longer exists when the export completes. The GrB_Matrix or GrB_Vector
// exists after an unpack operation, just with no entries. In both export and
// unpack, the user is returned several arrays that contain the matrix or
// vector in the requested format. Ownership of these arrays is given to the
// user application, which is then responsible for freeing them via the ANSI C
// free function. If the output format is supported by the GraphBLAS library,
// then these arrays may be returned to the user application in O(1) time and
// with no memory copying performed. Otherwise, the GraphBLAS library will
// create the output arrays for the user (via the ANSI C malloc function), fill
// them with the GrB_Matrix or GrB_Vector data, and then return the newly
// allocated arrays to the user.
// Eight different formats are provided for import/export. For each format,
// the Ax array has a C-type <type> corresponding to one of the 13 built-in
// types in GraphBLAS (bool, int*_t, uint*_t, float, double, float complex, or
// double complex), or a user-defined type.
// On import/pack, the required user arrays Ah, Ap, Ab, Ai, Aj, and/or Ax must
// be non-NULL pointers to memory space allocated by the ANSI C malloc (or
// calloc, or realloc), unless nzmax is zero (in which case the Ab, Ai, Aj, Ax,
// vb, vi, and vx arrays may all be NULL). For the import, A (or GrB_Vector v)
// is undefined on input, just like GrB_*_new, the GrB_Matrix. If the import
// is successful, the GrB_Matrix A or GrB_Vector v is created, and the pointers
// to the user input arrays have been set to NULL. These user arrays have
// either been incorporated directly into the GrB_Matrix A or GrB_Vector v, in
// which case the user input arrays will eventually be freed by GrB_free (&A),
// or their contents have been copied and the arrays freed. This decision is
// made by the GraphBLAS library itself, and the user application has no
// control over this decision.
// If any of the arrays Ab, Aj, Ai, Ax, vb, vi, or vx have zero size (with
// nzmax of zero), they are allowed to be be NULL pointers on input.
// A matrix or vector may be "iso", where all entries present in the pattern
// have the same value. In this case, the boolean iso flag is true, and the
// corresponding numerical array (Ax for matrices, vx for vectors, below) need
// be only large enough to hold a single value.
// No error checking is performed on the content of the user input arrays. If
// the user input arrays do not conform to the precise specifications above,
// results are undefined. No typecasting of the values of the matrix or vector
// entries is performed on import or export.
// SuiteSparse:GraphBLAS supports all eight formats natively (CSR, CSC,
// HyperCSR, and HyperCSC, BitmapR, BitmapC, FullR, FullC). For vectors, only
// CSC, BitmapC, and FullC formats are used. On import, the all eight formats
// take O(1) time and memory to import. On export, if the GrB_Matrix or
// GrB_Vector is already in this particular format, then the export takes O(1)
// time and no memory copying is performed.
// If the import is not successful, the GxB_Matrix_import_* functions return A
// as NULL, GxB_Vector_import returns v as NULL, and the user input arrays are
// neither modified nor freed. They are still owned by the user application.
// If the input data is untrusted, use the following descriptor setting for
// GxB_Matrix_import* and GxB_Matrix_pack*. The import/pack will be slower,
// but secure. GrB_Matrix_import uses the slow, secure method, since it has
// no descriptor input.
//
// GxB_set (desc, GxB_IMPORT, GxB_SECURE_IMPORT) ;
// As of v5.2.0, GxB_*import* and GxB_*export* are declared historical. Use
// GxB_*pack* and GxB_*unpack* instead. The GxB import/export will be kept
// but only documented here, not in the User Guide.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_CSR: pack a CSR matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_CSR // historical: use GxB_Matrix_pack_CSR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_CSR // pack a CSR matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
// CSR: an nrows-by-ncols matrix with nvals entries in CSR format consists
// of 3 arrays, where nvals = Ap [nrows]:
//
// GrB_Index Ap [nrows+1], Aj [nvals] ; <type> Ax [nvals] ;
//
// The column indices of entries in the ith row of the matrix are held
// in Aj [Ap [i] ... Ap[i+1]], and the corresponding values are held
// in the same positions in Ax. Column indices must be in the range 0
// to ncols-1. If jumbled is false, the column indices must appear in
// sorted order within each row. No duplicate column indices may
// appear in any row. Ap [0] must equal zero, and Ap [nrows] must
// equal nvals. The Ap array must be of size nrows+1 (or larger), and
// the Aj and Ax arrays must have size at least nvals. If nvals is
// zero, then the Aj and Ax arrays need not be present and can be
// NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_CSC: pack a CSC matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_CSC // historical: use GxB_Matrix_pack_CSC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_CSC // pack a CSC matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
// CSC: an nrows-by-ncols matrix with nvals entries in CSC format consists
// of 3 arrays, where nvals = Ap [ncols]:
//
// GrB_Index Ap [ncols+1], Ai [nvals] ; <type> Ax [nvals] ;
//
// The row indices of entries in the jth column of the matrix are held
// in Ai [Ap [j] ... Ap[j+1]], and the corresponding values are held
// in the same positions in Ax. Row indices must be in the range 0 to
// nrows-1. If jumbled is false, the row indices must appear in
// sorted order within each column. No duplicate row indices may
// appear in any column. Ap [0] must equal zero, and Ap [ncols] must
// equal nvals. The Ap array must be of size ncols+1 (or larger), and
// the Ai and Ax arrays must have size at least nvals. If nvals is
// zero, then the Ai and Ax arrays need not be present and can be
// NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_HyperCSR: pack a hypersparse CSR matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_HyperCSR // historical: use GxB_Matrix_pack_HyperCSR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of rows that appear in Ah
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_HyperCSR // pack a hypersparse CSR matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A) * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Aj_size, // size of Aj in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of rows that appear in Ah
bool jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
// HyperCSR: an nrows-by-ncols matrix with nvals entries and nvec
// rows that may have entries in HyperCSR format consists of 4 arrays,
// where nvals = Ap [nvec]:
//
// GrB_Index Ah [nvec], Ap [nvec+1], Aj [nvals] ;
// <type> Ax [nvals] ;
//
// The Aj and Ax arrays are the same for a matrix in CSR or HyperCSR
// format. Only Ap and Ah differ.
//
// The Ah array is a list of the row indices of rows that appear in
// the matrix. It
// must appear in sorted order, and no duplicates may appear. If i =
// Ah [k] is the kth row, then the column indices of the ith
// row appear in Aj [Ap [k] ... Ap [k+1]], and the corresponding
// values appear in the same locations in Ax. Column indices must be
// in the range 0 to ncols-1, and must appear in sorted order within
// each row. No duplicate column indices may appear in any row. nvec
// may be zero, to denote an array with no entries. The Ah array must
// be of size at least nvec, Ap must be of size at least nvec+1, and
// Aj and Ax must be at least of size nvals. If nvals is zero, then
// the Aj and Ax arrays need not be present and can be NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_HyperCSC: pack a hypersparse CSC matrix
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_HyperCSC // historical: use GxB_Matrix_pack_HyperCSC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A)*(type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of columns that appear in Ah
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_HyperCSC // pack a hypersparse CSC matrix
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t)
GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t)
GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t)
void **Ax, // values, Ax_size >= nvals(A)*(type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ap_size, // size of Ap in bytes
GrB_Index Ah_size, // size of Ah in bytes
GrB_Index Ai_size, // size of Ai in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvec, // number of columns that appear in Ah
bool jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
// HyperCSC: an nrows-by-ncols matrix with nvals entries and nvec
// columns that may have entries in HyperCSC format consists of 4 arrays,
// where nvals = Ap [nvec]:
//
//
// GrB_Index Ah [nvec], Ap [nvec+1], Ai [nvals] ;
// <type> Ax [nvals] ;
//
// The Ai and Ax arrays are the same for a matrix in CSC or HyperCSC
// format. Only Ap and Ah differ.
//
// The Ah array is a list of the column indices of non-empty columns.
// It must appear in sorted order, and no duplicates may appear. If j
// = Ah [k] is the kth non-empty column, then the row indices of the
// jth column appear in Ai [Ap [k] ... Ap [k+1]], and the
// corresponding values appear in the same locations in Ax. Row
// indices must be in the range 0 to nrows-1, and must appear in
// sorted order within each column. No duplicate row indices may
// appear in any column. nvec may be zero, to denote an array with no
// entries. The Ah array must be of size at least nvec, Ap must be of
// size at least nvec+1, and Ai and Ax must be at least of size nvals.
// If nvals is zero, then the Ai and Ax arrays need not be present and
// can be NULL.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_BitmapR: pack a bitmap matrix, held by row
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_BitmapR // historical: use GxB_Matrix_pack_BitmapR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_BitmapR // pack a bitmap matrix, held by row
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// BitmapR: a dense format, but able to represent sparsity structure of A.
//
// int8_t Ab [nrows*ncols] ;
// <type> Ax [nrows*ncols] ;
//
// Ab and Ax are both of size nrows*ncols. Ab [i*ncols+j] = 1 if the
// A(i,j) entry is present with value Ax [i*ncols+j], or 0 if A(i,j)
// is not present. nvals must equal the number of 1's in the Ab
// array.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_BitmapC: pack a bitmap matrix, held by column
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_BitmapC // historical: use GxB_Matrix_pack_BitmapC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_BitmapC // pack a bitmap matrix, held by column
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap, Ab_size >= nrows*ncols
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ab_size, // size of Ab in bytes
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// BitmapC: a dense format, but able to represent sparsity structure of A.
//
// int8_t Ab [nrows*ncols] ;
// <type> Ax [nrows*ncols] ;
//
// Ab and Ax are both of size nrows*ncols. Ab [i+j*nrows] = 1 if the
// A(i,j) entry is present with value Ax [i+j*nrows], or 0 if A(i,j)
// is not present. nvals must equal the number of 1's in the Ab
// array.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_FullR: pack a full matrix, held by row
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_FullR // historical: use GxB_Matrix_pack_FullR
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_FullR // pack a full matrix, held by row
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
// FullR: an nrows-by-ncols full matrix held in row-major order:
//
// <type> Ax [nrows*ncols] ;
//
// Ax is an array of size nrows*ncols, where A(i,j) is held in
// Ax [i*ncols+j]. All entries in A are present.
//------------------------------------------------------------------------------
// GxB_Matrix_pack_FullC: pack a full matrix, held by column
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Matrix_import_FullC // historical: use GxB_Matrix_pack_FullC
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_pack_FullC // pack a full matrix, held by column
(
GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged)
void **Ax, // values, Ax_size >= nrows*ncols * (type size)
// or Ax_size >= (type size), if iso is true
GrB_Index Ax_size, // size of Ax in bytes
bool iso, // if true, A is iso
const GrB_Descriptor desc
) ;
// FullC: an nrows-by-ncols full matrix held in column-major order:
//
// <type> Ax [nrows*ncols] ;
//
// Ax is an array of size nrows*ncols, where A(i,j) is held in
// Ax [i+j*nrows]. All entries in A are present.
//------------------------------------------------------------------------------
// GxB_Vector_pack_CSC: import/pack a vector in CSC format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_CSC // historical: use GxB_Vector_pack_CSC
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vi_size, // size of vi in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in vector
bool jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_CSC // pack a vector in CSC format
(
GrB_Vector v, // vector to create (type and length unchanged)
GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vi_size, // size of vi in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in vector
bool jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in CSC format, except that no vp array is required. If nvals is
// zero, then the vi and vx arrays need not be present and can be NULL.
//------------------------------------------------------------------------------
// GxB_Vector_pack_Bitmap: pack a vector in bitmap format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_Bitmap // historical: GxB_Vector_pack_Bitmap
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
int8_t **vb, // bitmap, vb_size >= n
void **vx, // values, vx_size >= n * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vb_size, // size of vb in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_Bitmap // pack a bitmap vector
(
GrB_Vector v, // vector to create (type and length unchanged)
int8_t **vb, // bitmap, vb_size >= n
void **vx, // values, vx_size >= n * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vb_size, // size of vb in bytes
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
GrB_Index nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in BitmapC format.
//------------------------------------------------------------------------------
// GxB_Vector_pack_Full: pack a vector in full format
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GxB_Vector_import_Full // historical: use GxB_Vector_pack_Full
(
GrB_Vector *v, // handle of vector to create
GrB_Type type, // type of vector to create
GrB_Index n, // vector length
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_pack_Full // pack a full vector
(
GrB_Vector v, // vector to create (type and length unchanged)
void **vx, // values, vx_size >= nvals(v) * (type size)
// or vx_size >= (type size), if iso is true
GrB_Index vx_size, // size of vx in bytes
bool iso, // if true, v is iso
const GrB_Descriptor desc
) ;
// The GrB_Vector is treated as if it was a single column of an n-by-1
// matrix in FullC format.
//------------------------------------------------------------------------------
// GxB* export/unpack
//------------------------------------------------------------------------------
// The GxB_*_export/unpack functions are symmetric with the GxB_*_import/pack
// functions. The export/unpack functions force completion of any pending
// operations, prior to the export, except if the only pending operation is to
// unjumble the matrix.
//
// If there are no entries in the matrix or vector, then the index arrays (Ai,
// Aj, or vi) and value arrays (Ax or vx) are returned as NULL. This is not an
// error condition.
//
// A GrB_Matrix may be exported/unpacked in any one of four different formats.
// On successful export, the input GrB_Matrix A is freed, and the output arrays
// Ah, Ap, Ai, Aj, and/or Ax are returned to the user application as arrays
// allocated by the ANSI C malloc function. The four formats are the same as
// the import formats for GxB_Matrix_import/pack.
//
// If jumbled is NULL on input, this indicates to GxB_*export/unpack* that the
// exported/unpacked matrix cannot be returned in a jumbled format. In this
// case, if the matrix is jumbled, it is sorted before exporting it to the
// caller.
//
// If iso is NULL on input, this indicates to the export/unpack methods that
// the exported/unpacked matrix cannot be returned in a iso format, with an Ax
// array with just one entry. In this case, if the matrix is iso, it is
// expanded before exporting/unpacking it to the caller.
//
// For the export/unpack*Full* methods, all entries in the matrix or must be
// present. That is, GrB_*_nvals must report nvals equal to nrows*ncols or a
// matrix. If this condition does not hold, the matrix/vector is not exported,
// and GrB_INVALID_VALUE is returned.
//
// If the export/unpack is not successful, the export/unpack functions do not
// modify matrix or vector and the user arrays are returned as NULL.
GB_PUBLIC
GrB_Info GxB_Matrix_export_CSR // historical: use GxB_Matrix_unpack_CSR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers"
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_CSR // unpack a CSR matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers"
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_CSC // historical: use GxB_Matrix_unpack_CSC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // column "pointers"
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_CSC // unpack a CSC matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // column "pointers"
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_HyperCSR // historical: use GxB_Matrix_unpack_HyperCSR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // row "pointers"
GrB_Index **Ah, // row indices
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of rows that appear in Ah
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_HyperCSR // unpack a hypersparse CSR matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // row "pointers"
GrB_Index **Ah, // row indices
GrB_Index **Aj, // column indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Aj_size, // size of Aj in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of rows that appear in Ah
bool *jumbled, // if true, indices in each row may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_HyperCSC // historical: use GxB_Matrix_unpack_HyperCSC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
GrB_Index **Ap, // column "pointers"
GrB_Index **Ah, // column indices
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of columns that appear in Ah
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_HyperCSC // unpack a hypersparse CSC matrix
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
GrB_Index **Ap, // column "pointers"
GrB_Index **Ah, // column indices
GrB_Index **Ai, // row indices
void **Ax, // values
GrB_Index *Ap_size, // size of Ap in bytes
GrB_Index *Ah_size, // size of Ah in bytes
GrB_Index *Ai_size, // size of Ai in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvec, // number of columns that appear in Ah
bool *jumbled, // if true, indices in each column may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_BitmapR // historical: use GxB_Matrix_unpack_BitmapR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_BitmapR // unpack a bitmap matrix, by row
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_BitmapC // historical: use GxB_Matrix_unpack_BitmapC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_BitmapC // unpack a bitmap matrix, by col
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
int8_t **Ab, // bitmap
void **Ax, // values
GrB_Index *Ab_size, // size of Ab in bytes
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FullR // historical: use GxB_Matrix_unpack_FullR
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_FullR // unpack a full matrix, by row
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FullC // historical: use GxB_Matrix_unpack_FullC
(
GrB_Matrix *A, // handle of matrix to export and free
GrB_Type *type, // type of matrix exported
GrB_Index *nrows, // number of rows of the matrix
GrB_Index *ncols, // number of columns of the matrix
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_unpack_FullC // unpack a full matrix, by column
(
GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged)
void **Ax, // values
GrB_Index *Ax_size, // size of Ax in bytes
bool *iso, // if true, A is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_CSC // historical: use GxB_Vector_unpack_CSC
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
GrB_Index **vi, // indices
void **vx, // values
GrB_Index *vi_size, // size of vi in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in vector
bool *jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_CSC // unpack a CSC vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
GrB_Index **vi, // indices
void **vx, // values
GrB_Index *vi_size, // size of vi in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in vector
bool *jumbled, // if true, indices may be unsorted
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_Bitmap // historical: use GxB_Vector_unpack_Bitmap
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
int8_t **vb, // bitmap
void **vx, // values
GrB_Index *vb_size, // size of vb in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_Bitmap // unpack a bitmap vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
int8_t **vb, // bitmap
void **vx, // values
GrB_Index *vb_size, // size of vb in bytes
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
GrB_Index *nvals, // # of entries in bitmap
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_export_Full // historical: use GxB_Vector_unpack_Full
(
GrB_Vector *v, // handle of vector to export and free
GrB_Type *type, // type of vector exported
GrB_Index *n, // length of the vector
void **vx, // values
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Vector_unpack_Full // unpack a full vector
(
GrB_Vector v, // vector to unpack (type and length unchanged)
void **vx, // values
GrB_Index *vx_size, // size of vx in bytes
bool *iso, // if true, v is iso
const GrB_Descriptor desc
) ;
//==============================================================================
// GrB import/export
//==============================================================================
// The GrB_Matrix_import method copies from user-provided arrays into an
// opaque GrB_Matrix and GrB_Matrix_export copies data out, from an opaque
// GrB_Matrix into user-provided arrays. Unlike the GxB pack/unpack methods,
// memory is not handed off between the user application and GraphBLAS.
// These methods are much slower than the GxB pack/unpack methods, since they
// require a copy of the data to be made. GrB_Matrix_import also must assume
// its input data cannot be trusted, and so it does extensive checks. The GxB
// pack takes O(1) time in all cases (unless it is told the input data is
// untrusted, via the descriptor). GxB unpack takes O(1) time unless the
// matrix is exported in a different format than it currently has.
// No typecasting of the values is done on import or export.
// The GrB C API specification supports 3 formats:
typedef enum
{
GrB_CSR_FORMAT = 0, // CSR format (equiv to GxB_SPARSE with GxB_BY_ROW)
GrB_CSC_FORMAT = 1, // CSC format (equiv to GxB_SPARSE with GxB_BY_COL)
GrB_COO_FORMAT = 2 // triplet format (like input to GrB*build)
}
GrB_Format ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_BOOL // import a GrB_BOOL matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_BOOL)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const bool *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT8 // import a GrB_INT8 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_iNT8)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int8_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT16 // import a GrB_INT16 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT16)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int16_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT32 // import a GrB_INT32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_INT64 // import a GrB_INT64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_INT64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const int64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT8 // import a GrB_UINT8 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT8)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint8_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT16 // import a GrB_UINT16 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT16)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint16_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT32 // import a GrB_UINT32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UINT64 // import a GrB_UINT64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_UINT64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const uint64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_FP32 // import a GrB_FP32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_FP32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const float *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_FP64 // import a GrB_FP64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GrB_FP64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const double *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_import_FC32 // import a GxB_FC32 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GxB_FC32)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const GxB_FC32_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_import_FC64 // import a GxB_FC64 matrix
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create (must be GxB_FC64)
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const GxB_FC64_t *Ax, // values
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_import_UDT // import a matrix with a user-defined type
(
GrB_Matrix *A, // handle of matrix to create
GrB_Type type, // type of matrix to create
GrB_Index nrows, // number of rows of the matrix
GrB_Index ncols, // number of columns of the matrix
const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
const GrB_Index *Ai, // row indices for CSR, CSC
const void *Ax, // values (must match the type parameter)
GrB_Index Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format // import format
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_import(A,type,nrows,ncols,Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt)\
_Generic \
( \
(Ax), \
GB_CASES (*, GrB, Matrix_import) \
) \
(A, type, nrows, ncols, Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt)
#endif
// For GrB_Matrix_export_T: on input, Ap_len, Ai_len, and Ax_len are
// the size of the 3 arrays Ap, Ai, and Ax, in terms of the # of entries.
// On output, these 3 values are modified to be the # of entries copied
// into those 3 arrays.
GB_PUBLIC
GrB_Info GrB_Matrix_export_BOOL // export a GrB_BOOL matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
bool *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_BOOL)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT8 // export a GrB_INT8 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int8_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT8)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT16 // export a GrB_INT16 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int16_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT16)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT32 // export a GrB_INT32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_INT64 // export a GrB_INT64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
int64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_INT64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT8 // export a GrB_UINT8 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint8_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT8)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT16 // export a GrB_UINT16 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint16_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT16)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT32 // export a GrB_UINT32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UINT64 // export a GrB_UINT64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
uint64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_UINT64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_FP32 // export a GrB_FP32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
float *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FP32)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_FP64 // export a GrB_FP64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
double *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FP64)
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FC32 // export a GrB_FC32 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
GxB_FC32_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FC32)
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_export_FC64 // export a GrB_FC64 matrix
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
GxB_FC64_t *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export (must be of type GrB_FC64)
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_export_UDT // export a matrix with a user-defined type
(
GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO
GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC
void *Ax, // values (must match the type of A)
GrB_Index *Ap_len, // number of entries in Ap (not # of bytes)
GrB_Index *Ai_len, // number of entries in Ai (not # of bytes)
GrB_Index *Ax_len, // number of entries in Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
) ;
#if GxB_STDC_VERSION >= 201112L
#define GrB_Matrix_export(Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt,A) \
_Generic \
( \
(Ax), \
GB_CASES (*, GrB, Matrix_export) \
) \
(Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt, A)
#endif
GB_PUBLIC
GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export
(
GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes)
GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes)
GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_exportHint // suggest the best export format
(
GrB_Format *format, // export format
GrB_Matrix A // matrix to export
) ;
//==============================================================================
// serialize/deserialize
//==============================================================================
// GxB_Matrix_serialize copies the contents of a GrB_Matrix into a single array
// of bytes (the "blob"). The contents of the blob are implementation
// dependent. The blob can be saved to a file, or sent across a communication
// channel, and then a GrB_Matrix can be reconstructed from the blob, even on
// another process or another machine, using the same version of
// SuiteSparse:GraphBLAS (v5.2.0 or later). The goal is that future versions
// of SuiteSparse:GraphBLAS should be able to read in the blob as well, and
// reconstruct a matrix. The matrix can be reconstructed from the blob using
// GxB_Matrix_deserialize. The blob is compressed, by default, and
// uncompressed by GxB_Matrix_deserialize.
// GrB_Matrix_serialize/deserialize are slightly different from their GxB*
// counterparts. The blob is allocated by GxB_Matrix_serialize, and must be
// freed by GxB_serialize_free (which calls the ANSI C11 free if GrB_init was
// used). By contrast, the GrB* methods require the user application to pass
// in a preallocated blob to GrB_Matrix_serialize, whose size can be given by
// GrB_Matrix_serializeSize (as a loose upper bound).
// The GrB* and GxB* methods can be mixed. GrB_Matrix_serialize and
// GxB_Matrix_serialize construct the same blob (assuming they are given the
// same # of threads to do the work). Both GrB_Matrix_deserialize and
// GxB_Matrix_deserialize can deserialize a blob coming from either
// GrB_Matrix_serialize or GxB_Matrix_serialize.
// Deserialization of untrusted data is a common security problem; see
// https://cwe.mitre.org/data/definitions/502.html. The deserialization methods
// below do a few basic checks so that no out-of-bounds access occurs during
// deserialization, but the output matrix itself may still be corrupted. If
// the data is untrusted, use this to check the matrix:
// GxB_Matrix_fprint (A, "A deserialized", GrB_SILENT, NULL)
// Example usage:
/*
//--------------------------------------------------------------------------
// using GxB serialize/deserialize
//--------------------------------------------------------------------------
// Given a GrB_Matrix A: assuming a user-defined type:
void *blob ;
GrB_Index blob_size ;
GxB_Matrix_serialize (&blob, &blob_size, A, NULL) ;
FILE *f = fopen ("myblob", "w") ;
fwrite (blob_size, sizeof (size_t), 1, f) ;
fwrite (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
GrB_Matrix_free (&A) ;
// B is a copy of A
GxB_Matrix_deserialize (&B, MyQtype, blob, blob_size, NULL) ;
GrB_Matrix_free (&B) ;
free (blob) ;
GrB_finalize ( ) ;
// --- in another process, to recreate the GrB_Matrix A:
GrB_init (GrB_NONBLOCKING) ;
FILE *f = fopen ("myblob", "r") ;
fread (&blob_size, sizeof (size_t), 1, f) ;
blob = malloc (blob_size) ;
fread (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
char type_name [GxB_MAX_NAME_LEN] ;
GxB_deserialize_type_name (type_name, blob, blob_size) ;
printf ("blob type is: %s\n", type_name) ;
GrB_Type user_type = NULL ;
if (strncmp (type_name, "myquaternion", GxB_MAX_NAME_LEN) == 0)
user_type = MyQtype ;
GxB_Matrix_deserialize (&A, user_type, blob, blob_size, NULL) ;
free (blob) ; // note, freed by the user, not GraphBLAS
//--------------------------------------------------------------------------
// using GrB serialize/deserialize
//--------------------------------------------------------------------------
// Given a GrB_Matrix A: assuming a user-defined type, MyQType:
void *blob = NULL ;
GrB_Index blob_size = 0 ;
GrB_Matrix A, B = NULL ;
// construct a matrix A, then serialized it:
GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound
blob = malloc (blob_size) ;
GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size
blob = realloc (blob, blob_size) ; // user can shrink the blob
FILE *f = fopen ("myblob", "w") ;
fwrite (blob_size, sizeof (size_t), 1, f) ;
fwrite (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
GrB_Matrix_free (&A) ;
// B is a copy of A:
GrB_Matrix_deserialize (&B, MyQtype, blob, blob_size) ;
GrB_Matrix_free (&B) ;
free (blob) ;
GrB_finalize ( ) ;
// --- in another process, to recreate the GrB_Matrix A:
GrB_init (GrB_NONBLOCKING) ;
FILE *f = fopen ("myblob", "r") ;
fread (&blob_size, sizeof (size_t), 1, f) ;
blob = malloc (blob_size) ;
fread (blob, sizeof (uint8_t), blob_size, f) ;
fclose (f) ;
// the user must know the type of A is MyQType
GrB_Matrix_deserialize (&A, MyQtype, blob, blob_size) ;
free (blob) ;
*/
// Three methods are currently implemented: no compression, LZ4, and LZ4HC
#define GxB_COMPRESSION_NONE -1 // no compression
#define GxB_COMPRESSION_DEFAULT 0 // LZ4
#define GxB_COMPRESSION_LZ4 1000 // LZ4
#define GxB_COMPRESSION_LZ4HC 2000 // LZ4HC, with default level 9
// possible future methods that could be added:
// #define GxB_COMPRESSION_ZLIB 3000 // ZLIB, with default level 6
// #define GxB_COMPRESSION_LZO 4000 // LZO, with default level 2
// #define GxB_COMPRESSION_BZIP2 5000 // BZIP2, with default level 9
// #define GxB_COMPRESSION_LZSS 6000 // LZSS
// using the Intel IPP versions, if available (not yet supported);
#define GxB_COMPRESSION_INTEL 1000000
// Most of the above methods have a level parameter that controls the tradeoff
// between run time and the amount of compression obtained. Higher levels
// result in a more compact result, at the cost of higher run time:
// LZ4 no level setting
// LZ4HC 1: fast, 9: default, 9: max
// these methos are not yet supported but may be added in the future:
// ZLIB 1: fast, 6: default, 9: max
// LZO 1: fast (X1ST), 2: default (XST)
// BZIP2 1: fast, 9: default, 9: max
// LZSS no level setting
// For all methods, a level of zero results in the default level setting.
// These settings can be added, so to use LZ4HC at level 5, use method =
// GxB_COMPRESSION_LZ4HC + 5.
// If the Intel IPPS compression methods are available, they can be selected
// by adding GxB_COMPRESSION_INTEL. For example, to use the Intel IPPS
// implementation of LZ4HC at level 9, use method = GxB_COMPRESSION_INTEL +
// GxB_COMPRESSION_LZ4HC + 9 = 1,002,009. If the Intel methods are requested
// but not available, this setting is ignored and the non-Intel methods are
// used instead.
// If the level setting is out of range, the default is used for that method.
// If the method is negative, no compression is performed. If the method is
// positive but unrecognized, the default is used (GxB_COMPRESSION_LZ4, with no
// level setting, and the non-Intel version).
// If a method is not implemented, LZ4 is used instead, and the level setting
// is ignored.
GB_PUBLIC
GrB_Info GxB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void **blob_handle, // the blob, allocated on output
GrB_Index *blob_size_handle, // size of the blob on output
// input:
GrB_Matrix A, // matrix to serialize
const GrB_Descriptor desc // descriptor to select compression method
// and to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void *blob, // the blob, already allocated in input
// input/output:
GrB_Index *blob_size_handle, // size of the blob on input. On output,
// the # of bytes used in the blob.
// input:
GrB_Matrix A // matrix to serialize
) ;
GB_PUBLIC
GrB_Info GxB_Vector_serialize // serialize a GrB_Vector to a blob
(
// output:
void **blob_handle, // the blob, allocated on output
GrB_Index *blob_size_handle, // size of the blob on output
// input:
GrB_Vector u, // vector to serialize
const GrB_Descriptor desc // descriptor to select compression method
// and to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_serializeSize // estimate the size of a blob
(
// output:
GrB_Index *blob_size_handle, // upper bound on the required size of the
// blob on output.
// input:
GrB_Matrix A // matrix to serialize
) ;
// The GrB* and GxB* deserialize methods are nearly identical. The GxB*
// deserialize methods simply add the descriptor, which allows for optional
// control of the # of threads used to deserialize the blob.
GB_PUBLIC
GrB_Info GxB_Matrix_deserialize // deserialize blob into a GrB_Matrix
(
// output:
GrB_Matrix *C, // output matrix created from the blob
// input:
GrB_Type type, // type of the matrix C. Required if the blob holds a
// matrix of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of C.
const void *blob, // the blob
GrB_Index blob_size, // size of the blob
const GrB_Descriptor desc // to control # of threads used
) ;
GB_PUBLIC
GrB_Info GrB_Matrix_deserialize // deserialize blob into a GrB_Matrix
(
// output:
GrB_Matrix *C, // output matrix created from the blob
// input:
GrB_Type type, // type of the matrix C. Required if the blob holds a
// matrix of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of C.
const void *blob, // the blob
GrB_Index blob_size // size of the blob
) ;
GB_PUBLIC
GrB_Info GxB_Vector_deserialize // deserialize blob into a GrB_Vector
(
// output:
GrB_Vector *w, // output vector created from the blob
// input:
GrB_Type type, // type of the vector w. Required if the blob holds a
// vector of user-defined type. May be NULL if blob
// holds a built-in type; otherwise must match the
// type of w.
const void *blob, // the blob
GrB_Index blob_size, // size of the blob
const GrB_Descriptor desc // to control # of threads used
) ;
// GxB_deserialize_type_name extracts the type_name of the GrB_Type of the
// GrB_Matrix or GrB_Vector held in a serialized blob. On input, type_name
// must point to a user-owned char array of size at least GxB_MAX_NAME_LEN (it
// must not point into the blob itself). On output, type_name will contain a
// null-terminated string with the corresponding C type name. If the blob
// holds a matrix of a built-in type, the name is returned as "bool" for
// GrB_BOOL, "uint8_t" for GrB_UINT8, "float complex" for GxB_FC32, etc.
// See GxB_Type_name to convert this name into a GrB_Type.
GB_PUBLIC
GrB_Info GxB_deserialize_type_name // return the type name of a blob
(
// output:
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
// input, not modified:
const void *blob, // the blob
GrB_Index blob_size // size of the blob
) ;
//==============================================================================
// GxB_Vector_sort and GxB_Matrix_sort: sort a matrix or vector
//==============================================================================
GB_PUBLIC
GrB_Info GxB_Vector_sort
(
// output:
GrB_Vector w, // vector of sorted values
GrB_Vector p, // vector containing the permutation
// input
GrB_BinaryOp op, // comparator op
GrB_Vector u, // vector to sort
const GrB_Descriptor desc
) ;
GB_PUBLIC
GrB_Info GxB_Matrix_sort
(
// output:
GrB_Matrix C, // matrix of sorted values
GrB_Matrix P, // matrix containing the permutations
// input
GrB_BinaryOp op, // comparator op
GrB_Matrix A, // matrix to sort
const GrB_Descriptor desc
) ;
#define GxB_sort(arg1,...) \
_Generic \
( \
(arg1), \
GrB_Vector : GxB_Vector_sort , \
GrB_Matrix : GxB_Matrix_sort \
) \
(arg1, __VA_ARGS__)
//==============================================================================
// GxB_Iterator: an object that iterates over the entries of a matrix or vector
//==============================================================================
/* Example usage:
single thread iteration of a whole matrix, one row at a time (in the
outer loop), and one entry at a time within the row (in the inner loop):
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the matrix A, known to be type GrB_FP64
GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to A(0,:)
info = GxB_rowIterator_seekRow (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// iterate over entries in A(i,:)
GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ;
while (info == GrB_SUCCESS)
{
// get the entry A(i,j)
GrB_Index j = GxB_rowIterator_getColIndex (iterator) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A(i,:)
info = GxB_rowIterator_nextCol (iterator) ;
}
// move to the next row, A(i+1,:)
info = GxB_rowIterator_nextRow (iterator) ;
}
GrB_free (&iterator) ;
parallel iteration using 4 threads (work may be imbalanced however):
GrB_Index nrows ;
GrB_wait (A, GrB_MATERIALIZE) ; // this is essential
GrB_Matrix_nrows (&nrows, A) ;
#pragma omp parallel for num_threads(4)
for (int tid = 0 ; tid < 4 ; tid++)
{
// thread tid operates on A(row1:row2-1,:)
GrB_Index row1 = tid * (nrows / 4) ;
GrB_Index row2 = (tid == 3) ? nrows : ((tid+1) * (nrows / 4)) ;
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to A(row1,:)
info = GxB_rowIterator_seekRow (iterator, row1) ;
while (info != GxB_EXHAUSTED)
{
// iterate over entries in A(i,:)
GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ;
if (i >= row2) break ;
while (info == GrB_SUCCESS)
{
// get the entry A(i,j)
GrB_Index j = GxB_rowIterator_getColIndex (iterator) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A(i,:)
info = GxB_rowIterator_nextCol (iterator) ;
}
// move to the next row, A(i+1,:)
info = GxB_rowIterator_nextRow (iterator) ;
}
GrB_free (&iterator) ;
}
In the parallel example above, a more balanced work distribution can be
obtained by first computing the row degree via GrB_mxv (see LAGraph), and
then compute the cumulative sum (ideally in parallel). Next, partition the
cumulative sum into one part per thread via binary search, and divide the
rows into parts accordingly.
*/
//------------------------------------------------------------------------------
// GxB_Iterator: definition and new/free methods
//------------------------------------------------------------------------------
// The contents of an iterator must not be directly accessed by the user
// application. Only the functions and macros provided here may access
// "iterator->..." contents. The iterator is defined here only so that macros
// can be used to speed up the use of the iterator methods. User applications
// must not use "iterator->..." directly.
struct GB_Iterator_opaque
{
// these components change as the iterator moves (via seek or next):
int64_t pstart ; // the start of the current vector
int64_t pend ; // the end of the current vector
int64_t p ; // position of the current entry
int64_t k ; // the current vector
// only changes when the iterator is created:
size_t header_size ; // size of this iterator object
// these components only change when the iterator is attached:
int64_t pmax ; // avlen*avdim for bitmap; nvals(A) otherwise
int64_t avlen ; // length of each vector in the matrix
int64_t avdim ; // number of vectors in the matrix dimension
int64_t anvec ; // # of vectors present in the matrix
const int64_t *GB_restrict Ap ; // pointers for sparse and hypersparse
const int64_t *GB_restrict Ah ; // vector names for hypersparse
const int8_t *GB_restrict Ab ; // bitmap
const int64_t *GB_restrict Ai ; // indices for sparse and hypersparse
const void *GB_restrict Ax ; // values for all 4 data structures
size_t type_size ; // size of the type of A
int A_sparsity ; // sparse, hyper, bitmap, or full
bool iso ; // true if A is iso-valued, false otherwise
bool by_col ; // true if A is held by column, false if by row
} ;
typedef struct GB_Iterator_opaque *GxB_Iterator ;
// GxB_Iterator_new: create a new iterator, not attached to any matrix/vector
GB_PUBLIC GrB_Info GxB_Iterator_new (GxB_Iterator *iterator) ;
// GxB_Iterator_free: free an iterator
GB_PUBLIC GrB_Info GxB_Iterator_free (GxB_Iterator *iterator) ;
//==============================================================================
// GB_Iterator_*: implements user-callable GxB_*Iterator_* methods
//==============================================================================
// GB_* methods are not user-callable. These methods appear here so that the
// iterator methods can be done via macros.
//------------------------------------------------------------------------------
// GB_Iterator_attach: attach a row/col/entry iterator to a matrix
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_attach
(
GxB_Iterator iterator, // iterator to attach to the matrix A
GrB_Matrix A, // matrix to attach
GxB_Format_Value format, // by row, by col, or by entry (GxB_NO_FORMAT)
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_seek: seek a row/col iterator to a particular vector
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_rc_seek
(
GxB_Iterator iterator,
GrB_Index j,
bool jth_vector
) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_bitmap_next: move a row/col iterator to next entry in bitmap
//------------------------------------------------------------------------------
GB_PUBLIC GrB_Info GB_Iterator_rc_bitmap_next (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GB_Iterator_rc_knext: move a row/col iterator to the next vector
//------------------------------------------------------------------------------
#define GB_Iterator_rc_knext(iterator) \
( \
/* move to the next vector, and check if iterator is exhausted */ \
(++(iterator->k) >= iterator->anvec) ? \
( \
/* iterator is at the end of the matrix */ \
iterator->pstart = 0, \
iterator->pend = 0, \
iterator->p = 0, \
iterator->k = iterator->anvec, \
GxB_EXHAUSTED \
) \
: \
( \
/* find first entry in vector, and pstart/pend for this vector */ \
(iterator->A_sparsity <= GxB_SPARSE) ? \
( \
/* matrix is sparse or hypersparse */ \
iterator->pstart = iterator->Ap [iterator->k], \
iterator->pend = iterator->Ap [iterator->k+1], \
iterator->p = iterator->pstart, \
((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \
) \
: \
( \
/* matrix is bitmap or full */ \
iterator->pstart += iterator->avlen, \
iterator->pend += iterator->avlen, \
iterator->p = iterator->pstart, \
(iterator->A_sparsity <= GxB_BITMAP) ? \
( \
/* matrix is bitmap */ \
GB_Iterator_rc_bitmap_next (iterator) \
) \
: \
( \
/* matrix is full */ \
((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \
) \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_inext: move a row/col iterator the next entry in the vector
//------------------------------------------------------------------------------
#define GB_Iterator_rc_inext(iterator) \
( \
/* move to the next entry in the vector */ \
(++(iterator->p) >= iterator->pend) ? \
( \
/* no more entries in the current vector */ \
GrB_NO_VALUE \
) \
: \
( \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
/* the matrix is in bitmap form */ \
GB_Iterator_rc_bitmap_next (iterator) \
) \
: \
( \
GrB_SUCCESS \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_getj: get index of current vector for row/col iterator
//------------------------------------------------------------------------------
#define GB_Iterator_rc_getj(iterator) \
( \
(iterator->k >= iterator->anvec) ? \
( \
/* iterator is past the end of the matrix */ \
iterator->avdim \
) \
: \
( \
(iterator->A_sparsity == GxB_HYPERSPARSE) ? \
( \
/* return the name of kth vector: j = Ah [k] if it appears */ \
iterator->Ah [iterator->k] \
) \
: \
( \
/* return the kth vector: j = k */ \
iterator->k \
) \
) \
)
//------------------------------------------------------------------------------
// GB_Iterator_rc_geti: return index of current entry for row/col iterator
//------------------------------------------------------------------------------
#define GB_Iterator_rc_geti(iterator) \
( \
(iterator->Ai != NULL) ? \
( \
iterator->Ai [iterator->p] \
) \
: \
( \
(iterator->p - iterator->pstart) \
) \
)
//==============================================================================
// GxB_rowIterator_*: iterate over the rows of a matrix
//==============================================================================
#undef GxB_rowIterator_attach
#undef GxB_rowIterator_kount
#undef GxB_rowIterator_seekRow
#undef GxB_rowIterator_kseek
#undef GxB_rowIterator_nextRow
#undef GxB_rowIterator_nextCol
#undef GxB_rowIterator_getRowIndex
#undef GxB_rowIterator_getColIndex
//------------------------------------------------------------------------------
// GxB_rowIterator_attach: attach a row iterator to a matrix
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_rowIterator_attach attaches a row iterator to a matrix. If the iterator
// is already attached to a matrix, it is detached and then attached to the
// given matrix A.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or A are NULL.
// GrB_INVALID_OBJECT: if the matrix A is invalid.
// GrB_NOT_IMPLEMENTED: if the matrix A cannot be iterated by row.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the row iterator is attached to the matrix, but not to any
// specific row. Use GxB_rowIterator_*seek* to move the iterator to a row.
GB_PUBLIC
GrB_Info GxB_rowIterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
#define GxB_rowIterator_attach(iterator, A, desc) \
( \
GB_Iterator_attach (iterator, A, GxB_BY_ROW, desc) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_kount: upper bound on the # of nonempty rows of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_kount returns an upper bound on the # of non-empty rows of a
// matrix. A GraphBLAS library may always return this as simply nrows(A), but
// in some libraries, it may be a value between the # of rows with at least one
// entry, and nrows(A), inclusive. Any value in this range is a valid return
// value from this function.
// For SuiteSparse:GraphBLAS: If A is m-by-n, and sparse, bitmap, or full, then
// kount == m. If A is hypersparse, kount is the # of vectors held in the data
// structure for the matrix, some of which may be empty, and kount <= m.
GB_PUBLIC
GrB_Index GxB_rowIterator_kount (GxB_Iterator iterator) ;
#define GxB_rowIterator_kount(iterator) \
( \
(iterator)->anvec \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_seekRow: move a row iterator to a different row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_seekRow moves a row iterator to the first entry of A(row,:).
// If A(row,:) has no entries, the iterator may move to the first entry of next
// nonempty row i for some i > row. The row index can be determined by
// GxB_rowIterator_getRowIndex.
// For SuiteSparse:GraphBLAS: If the matrix is hypersparse, and the row
// does not appear in the hyperlist, then the iterator is moved to the first
// row after the given row that does appear in the hyperlist.
// The method is always successful; the following are conditions are returned:
// GxB_EXHAUSTED: if the row index is >= nrows(A); the row iterator is
// exhausted, but is still attached to the matrix.
// GrB_NO_VALUE: if the row index is valid but A(row,:) has no entries; the
// row iterator is positioned at A(row,:).
// GrB_SUCCESS: if the row index is valid and A(row,:) has at least one
// entry. The row iterator is positioned at A(row,:).
// GxB_rowIterator_get* can be used to return the indices of
// the first entry in A(row,:), and GxB_Iterator_get* can
// return its value.
GB_PUBLIC
GrB_Info GxB_rowIterator_seekRow (GxB_Iterator iterator, GrB_Index row) ;
#define GxB_rowIterator_seekRow(iterator, row) \
( \
GB_Iterator_rc_seek (iterator, row, false) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_kseek: move a row iterator to a different row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must be attached to a matrix, but need not be at
// any specific row; results are undefined if this condition is not met.
// GxB_rowIterator_kseek is identical to GxB_rowIterator_seekRow, except for
// how the row index is specified. The row is the kth non-empty row of A.
// More precisely, k is in the range 0 to kount-1, where kount is the value
// returned by GxB_rowIterator_kount.
GB_PUBLIC
GrB_Info GxB_rowIterator_kseek (GxB_Iterator iterator, GrB_Index k) ;
#define GxB_rowIterator_kseek(iterator, k) \
( \
GB_Iterator_rc_seek (iterator, k, true) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_nextRow: move a row iterator to the next row of a matrix
//------------------------------------------------------------------------------
// On input, the row iterator must already be attached to a matrix via a prior
// call to GxB_rowIterator_attach, and the iterator must be at a specific row,
// via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow;
// results are undefined if this condition is not met.
// If the the row iterator is currently at A(row,:), it is moved to A(row+1,:),
// or to the first non-empty row after A(row,:), at the discretion of this
// method. That is, empty rows may be skipped.
// The method is always successful, and the return conditions are identical to
// the return conditions of GxB_rowIterator_seekRow.
GB_PUBLIC
GrB_Info GxB_rowIterator_nextRow (GxB_Iterator iterator) ;
#define GxB_rowIterator_nextRow(iterator) \
( \
GB_Iterator_rc_knext (iterator) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_nextCol: move a row iterator to the next entry in A(row,:)
//------------------------------------------------------------------------------
// On input, the row iterator must already be attached to a matrix via a prior
// call to GxB_rowIterator_attach, and the iterator must be at a specific row,
// via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow;
// results are undefined if this condition is not met.
// The method is always successful, and returns the following conditions:
// GrB_NO_VALUE: If the iterator is already exhausted, or if there is no
// entry in the current A(row,;),
// GrB_SUCCESS: If the row iterator has been moved to the next entry in
// A(row,:).
GB_PUBLIC
GrB_Info GxB_rowIterator_nextCol (GxB_Iterator iterator) ;
#define GxB_rowIterator_nextCol(iterator) \
( \
GB_Iterator_rc_inext ((iterator)) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_getRowIndex: get current row index of a row iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already successfully attached to matrix as a
// row iterator; results are undefined if this condition is not met.
// The method returns nrows(A) if the iterator is exhausted, or the current
// row index otherwise. There need not be any entry in the current row.
// Zero is returned if the iterator is attached to the matrix but
// GxB_rowIterator_*seek* has not been called, but this does not mean the
// iterator is positioned at row zero.
GB_PUBLIC
GrB_Index GxB_rowIterator_getRowIndex (GxB_Iterator iterator) ;
#define GxB_rowIterator_getRowIndex(iterator) \
( \
GB_Iterator_rc_getj ((iterator)) \
)
//------------------------------------------------------------------------------
// GxB_rowIterator_getColIndex: get current column index of a row iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already successfully attached to matrix as a
// row iterator, and in addition, the row iterator must be positioned at a
// valid entry present in the matrix. That is, the last call to
// GxB_rowIterator_*seek* or GxB_rowIterator_*next*, must have returned
// GrB_SUCCESS. Results are undefined if this condition is not met.
GB_PUBLIC
GrB_Index GxB_rowIterator_getColIndex (GxB_Iterator iterator) ;
#define GxB_rowIterator_getColIndex(iterator) \
( \
GB_Iterator_rc_geti ((iterator)) \
)
//==============================================================================
// GxB_colIterator_*: iterate over columns of a matrix
//==============================================================================
// The column iterator is analoguous to the row iterator.
#undef GxB_colIterator_attach
#undef GxB_colIterator_kount
#undef GxB_colIterator_seekCol
#undef GxB_colIterator_kseek
#undef GxB_colIterator_nextCol
#undef GxB_colIterator_nextRow
#undef GxB_colIterator_getColIndex
#undef GxB_colIterator_getRowIndex
// GxB_colIterator_attach: attach a column iterator to a matrix
GB_PUBLIC
GrB_Info GxB_colIterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
#define GxB_colIterator_attach(iterator, A, desc) \
( \
GB_Iterator_attach (iterator, A, GxB_BY_COL, desc) \
)
// GxB_colIterator_kount: return # of nonempty columns of the matrix
GB_PUBLIC
GrB_Index GxB_colIterator_kount (GxB_Iterator iterator) ;
#define GxB_colIterator_kount(iterator) \
( \
(iterator)->anvec \
)
// GxB_colIterator_seekCol: move a column iterator to A(:,col)
GB_PUBLIC
GrB_Info GxB_colIterator_seekCol (GxB_Iterator iterator, GrB_Index col) ;
#define GxB_colIterator_seekCol(iterator, col) \
( \
GB_Iterator_rc_seek (iterator, col, false) \
)
// GxB_colIterator_kseek: move a column iterator to kth non-empty column of A
GB_PUBLIC
GrB_Info GxB_colIterator_kseek (GxB_Iterator iterator, GrB_Index k) ;
#define GxB_colIterator_kseek(iterator, k) \
( \
GB_Iterator_rc_seek (iterator, k, true) \
)
// GxB_colIterator_nextCol: move a column iterator to first entry of next column
GB_PUBLIC
GrB_Info GxB_colIterator_nextCol (GxB_Iterator iterator) ;
#define GxB_colIterator_nextCol(iterator) \
( \
GB_Iterator_rc_knext ((iterator)) \
)
// GxB_colIterator_nextRow: move a column iterator to next entry in column
GB_PUBLIC
GrB_Info GxB_colIterator_nextRow (GxB_Iterator iterator) ;
#define GxB_colIterator_nextRow(iterator) \
( \
GB_Iterator_rc_inext ((iterator)) \
)
// GxB_colIterator_getColIndex: return the column index of current entry
GB_PUBLIC
GrB_Index GxB_colIterator_getColIndex (GxB_Iterator iterator) ;
#define GxB_colIterator_getColIndex(iterator) \
( \
GB_Iterator_rc_getj ((iterator)) \
)
// GxB_colIterator_getRowIndex: return the row index of current entry
GB_PUBLIC
GrB_Index GxB_colIterator_getRowIndex (GxB_Iterator iterator) ;
#define GxB_colIterator_getRowIndex(iterator) \
( \
GB_Iterator_rc_geti ((iterator)) \
)
//==============================================================================
// GxB_Matrix_Iterator_*: iterate over the entries of a matrix
//==============================================================================
// Example usage:
// single thread iteration of a whole matrix, one entry at at time
/*
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the matrix A, known to be type GrB_FP64
GrB_Info info = GxB_Matrix_Iterator_attach (iterator, A, NULL) ;
if (info < 0) { handle the failure ... }
// seek to the first entry
info = GxB_Matrix_Iterator_seek (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// get the entry A(i,j)
GrB_Index i, j ;
GxB_Matrix_Iterator_getIndex (iterator, &i, &j) ;
double aij = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in A
info = GxB_Matrix_Iterator_next (iterator) ;
}
GrB_free (&iterator) ;
*/
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_attach: attach an entry iterator to a matrix
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_Matrix_Iterator_attach attaches an entry iterator to a matrix. If the
// iterator is already attached to a matrix, it is detached and then attached
// to the given matrix A.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or A are NULL.
// GrB_INVALID_OBJECT: if the matrix A is invalid.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the entry iterator is attached to the matrix, but not to any
// specific entry. Use GxB_Matrix_Iterator_*seek* to move the iterator to a
// particular entry.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_attach
(
GxB_Iterator iterator,
GrB_Matrix A,
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getpmax: return the range of the iterator
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach; results are undefined if this condition is not
// met.
// Entries in a matrix are given an index p, ranging from 0 to pmax-1, where
// pmax >= nvals(A). For sparse, hypersparse, and full matrices, pmax is equal
// to nvals(A). For an m-by-n bitmap matrix, pmax=m*n, or pmax=0 if the
// matrix has no entries.
GB_PUBLIC
GrB_Index GxB_Matrix_Iterator_getpmax (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_seek: seek to a specific entry
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach; results are undefined if this condition is not
// met.
// The input p is in range 0 to pmax-1, which points to an entry in the matrix,
// or p >= pmax if the iterator is exhausted, where pmax is the return value
// from GxB_Matrix_Iterator_getpmax.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// matrix, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_next: move to the next entry of a matrix
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next. Results are undefined if these conditions are not
// met.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// matrix, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Matrix_Iterator_next (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getp: get the current position of a matrix iterator
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next. Results are undefined if these conditions are not
// met.
GB_PUBLIC
GrB_Index GxB_Matrix_Iterator_getp (GxB_Iterator iterator) ;
//------------------------------------------------------------------------------
// GxB_Matrix_Iterator_getIndex: get the row and column index of a matrix entry
//------------------------------------------------------------------------------
// On input, the entry iterator must be already attached to a matrix via
// GxB_Matrix_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Matrix_Iterator_seek or
// GxB_Matrix_Iterator_next, with a return value of GrB_SUCCESS. Results are
// undefined if these conditions are not met.
GB_PUBLIC
void GxB_Matrix_Iterator_getIndex
(
GxB_Iterator iterator,
GrB_Index *row,
GrB_Index *col
) ;
//==============================================================================
// GxB_Vector_Iterator_*: iterate over the entries of a vector
//==============================================================================
/* Example usage:
single thread iteration of a whole vector, one entry at at time
// create an iterator
GxB_Iterator iterator ;
GxB_Iterator_new (&iterator) ;
// attach it to the vector v, known to be type GrB_FP64
GrB_Info info = GxB_Vector_Iterator_attach (iterator, v, NULL) ;
if (info < 0) { handle the failure ... }
// seek to the first entry
info = GxB_Vector_Iterator_seek (iterator, 0) ;
while (info != GxB_EXHAUSTED)
{
// get the entry v(i)
GrB_Index i = GxB_Vector_Iterator_getIndex (iterator) ;
double vi = GxB_Iterator_get_FP64 (iterator) ;
// move to the next entry in v
info = GxB_Vector_Iterator_next (iterator) ;
}
GrB_free (&iterator) ;
*/
#undef GxB_Vector_Iterator_getpmax
#undef GxB_Vector_Iterator_seek
#undef GxB_Vector_Iterator_next
#undef GxB_Vector_Iterator_getp
#undef GxB_Vector_Iterator_getIndex
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_attach: attach an iterator to a vector
//------------------------------------------------------------------------------
// On input, the iterator must already exist, having been created by
// GxB_Iterator_new.
// GxB_Vector_Iterator_attach attaches an iterator to a vector. If the
// iterator is already attached to a vector or matrix, it is detached and then
// attached to the given vector v.
// The following error conditions are returned:
// GrB_NULL_POINTER: if the iterator or v are NULL.
// GrB_INVALID_OBJECT: if the vector v is invalid.
// GrB_OUT_OF_MEMORY: if the method runs out of memory.
// If successful, the iterator is attached to the vector, but not to any
// specific entry. Use GxB_Vector_Iterator_seek to move the iterator to a
// particular entry.
GB_PUBLIC GrB_Info GxB_Vector_Iterator_attach
(
GxB_Iterator iterator,
GrB_Vector v,
GrB_Descriptor desc
) ;
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getpmax: return the range of the vector iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach; results are undefined if this condition is not
// met.
// Entries in a vector are given an index p, ranging from 0 to pmax-1, where
// pmax >= nvals(v). For sparse and full vectors, pmax is equal to nvals(v).
// For a size-m bitmap vector, pmax=m, or pmax=0 if the vector has no entries.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getpmax (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getpmax(iterator) \
( \
(iterator->pmax) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_seek: seek to a specific entry in the vector
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach; results are undefined if this condition is not
// met.
// The input p is in range 0 to pmax-1, which points to an entry in the vector,
// or p >= pmax if the iterator is exhausted, where pmax is the return value
// from GxB_Vector_Iterator_getpmax.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// vector, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GB_Vector_Iterator_bitmap_seek (GxB_Iterator iterator,
GrB_Index unused) ; // unused parameter to be removed in v8.x
GB_PUBLIC
GrB_Info GxB_Vector_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ;
#define GB_Vector_Iterator_seek(iterator, q) \
( \
(q >= iterator->pmax) ? \
( \
/* the iterator is exhausted */ \
iterator->p = iterator->pmax, \
GxB_EXHAUSTED \
) \
: \
( \
/* seek to an arbitrary position in the vector */ \
iterator->p = q, \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
GB_Vector_Iterator_bitmap_seek (iterator, 0) \
) \
: \
( \
GrB_SUCCESS \
) \
) \
)
#define GxB_Vector_Iterator_seek(iterator, p) \
( \
GB_Vector_Iterator_seek (iterator, p) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_next: move to the next entry of a vector
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next. Results are undefined if these conditions are not
// met.
// Returns GrB_SUCCESS if the iterator is at an entry that exists in the
// vector, or GxB_EXHAUSTED if the iterator is exhausted.
GB_PUBLIC
GrB_Info GxB_Vector_Iterator_next (GxB_Iterator iterator) ;
#define GB_Vector_Iterator_next(iterator) \
( \
/* move to the next entry */ \
(++(iterator->p) >= iterator->pmax) ? \
( \
/* the iterator is exhausted */ \
iterator->p = iterator->pmax, \
GxB_EXHAUSTED \
) \
: \
( \
(iterator->A_sparsity == GxB_BITMAP) ? \
( \
/* bitmap: seek to the next entry present in the bitmap */ \
GB_Vector_Iterator_bitmap_seek (iterator, 0) \
) \
: \
( \
/* other formats: already at the next entry */ \
GrB_SUCCESS \
) \
) \
)
#define GxB_Vector_Iterator_next(iterator) \
( \
GB_Vector_Iterator_next (iterator) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getp: get the current position of a vector iterator
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next. Results are undefined if these conditions are not
// met.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getp (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getp(iterator) \
( \
(iterator->p) \
)
//------------------------------------------------------------------------------
// GxB_Vector_Iterator_getIndex: get the index of a vector entry
//------------------------------------------------------------------------------
// On input, the iterator must be already attached to a vector via
// GxB_Vector_Iterator_attach, and the position of the iterator must also have
// been defined by a prior call to GxB_Vector_Iterator_seek or
// GxB_Vector_Iterator_next, with a return value of GrB_SUCCESS. Results are
// undefined if these conditions are not met.
GB_PUBLIC
GrB_Index GxB_Vector_Iterator_getIndex (GxB_Iterator iterator) ;
#define GxB_Vector_Iterator_getIndex(iterator) \
( \
((iterator->Ai != NULL) ? iterator->Ai [iterator->p] : iterator->p) \
)
//==============================================================================
// GxB_Iterator_get_TYPE: get value of the current entry for any iterator
//==============================================================================
// On input, the prior call to GxB_*Iterator_*seek*, or GxB_*Iterator_*next*
// must have returned GrB_SUCCESS, indicating that the iterator is at a valid
// current entry for either a matrix or vector.
// Returns the value of the current entry at the position determined by the
// iterator. No typecasting is permitted; the method name must match the
// type of the matrix or vector.
#undef GxB_Iterator_get_BOOL
#undef GxB_Iterator_get_INT8
#undef GxB_Iterator_get_INT16
#undef GxB_Iterator_get_INT32
#undef GxB_Iterator_get_INT64
#undef GxB_Iterator_get_UINT8
#undef GxB_Iterator_get_UINT16
#undef GxB_Iterator_get_UINT32
#undef GxB_Iterator_get_UINT64
#undef GxB_Iterator_get_FP32
#undef GxB_Iterator_get_FP64
#undef GxB_Iterator_get_FC32
#undef GxB_Iterator_get_FC64
#undef GxB_Iterator_get_UDT
GB_PUBLIC bool GxB_Iterator_get_BOOL (GxB_Iterator iterator) ;
GB_PUBLIC int8_t GxB_Iterator_get_INT8 (GxB_Iterator iterator) ;
GB_PUBLIC int16_t GxB_Iterator_get_INT16 (GxB_Iterator iterator) ;
GB_PUBLIC int32_t GxB_Iterator_get_INT32 (GxB_Iterator iterator) ;
GB_PUBLIC int64_t GxB_Iterator_get_INT64 (GxB_Iterator iterator) ;
GB_PUBLIC uint8_t GxB_Iterator_get_UINT8 (GxB_Iterator iterator) ;
GB_PUBLIC uint16_t GxB_Iterator_get_UINT16 (GxB_Iterator iterator) ;
GB_PUBLIC uint32_t GxB_Iterator_get_UINT32 (GxB_Iterator iterator) ;
GB_PUBLIC uint64_t GxB_Iterator_get_UINT64 (GxB_Iterator iterator) ;
GB_PUBLIC float GxB_Iterator_get_FP32 (GxB_Iterator iterator) ;
GB_PUBLIC double GxB_Iterator_get_FP64 (GxB_Iterator iterator) ;
GB_PUBLIC GxB_FC32_t GxB_Iterator_get_FC32 (GxB_Iterator iterator) ;
GB_PUBLIC GxB_FC64_t GxB_Iterator_get_FC64 (GxB_Iterator iterator) ;
GB_PUBLIC void GxB_Iterator_get_UDT (GxB_Iterator iterator,
void *value) ;
#define GB_Iterator_get(iterator, type) \
( \
(((type *) (iterator)->Ax) [(iterator)->iso ? 0 : (iterator)->p]) \
)
#define GxB_Iterator_get_BOOL(iterator) GB_Iterator_get (iterator, bool)
#define GxB_Iterator_get_INT8(iterator) GB_Iterator_get (iterator, int8_t)
#define GxB_Iterator_get_INT16(iterator) GB_Iterator_get (iterator, int16_t)
#define GxB_Iterator_get_INT32(iterator) GB_Iterator_get (iterator, int32_t)
#define GxB_Iterator_get_INT64(iterator) GB_Iterator_get (iterator, int64_t)
#define GxB_Iterator_get_UINT8(iterator) GB_Iterator_get (iterator, uint8_t)
#define GxB_Iterator_get_UINT16(iterator) GB_Iterator_get (iterator, uint16_t)
#define GxB_Iterator_get_UINT32(iterator) GB_Iterator_get (iterator, uint32_t)
#define GxB_Iterator_get_UINT64(iterator) GB_Iterator_get (iterator, uint64_t)
#define GxB_Iterator_get_FP32(iterator) GB_Iterator_get (iterator, float)
#define GxB_Iterator_get_FP64(iterator) GB_Iterator_get (iterator, double)
#define GxB_Iterator_get_FC32(iterator) GB_Iterator_get (iterator, GxB_FC32_t)
#define GxB_Iterator_get_FC64(iterator) GB_Iterator_get (iterator, GxB_FC64_t)
#define GxB_Iterator_get_UDT(iterator, value) \
( \
(void) memcpy ((void *) value, ((const uint8_t *) ((iterator)->Ax)) + \
((iterator)->iso ? 0 : ((iterator)->type_size * (iterator)->p)), \
(iterator)->type_size) \
)
//------------------------------------------------------------------------------
// Rapids Memory Manager wrappers for SuiteSparse:GraphBLAS
//------------------------------------------------------------------------------
#ifndef RMM_WRAP_H
#define RMM_WRAP_H
#include <stddef.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
// TODO describe the modes
typedef enum { rmm_wrap_host=0, rmm_wrap_host_pinned=1, rmm_wrap_device=2, rmm_wrap_managed=3 } RMM_MODE ;
void rmm_wrap_finalize (void) ;
int rmm_wrap_initialize (RMM_MODE mode, size_t init_pool_size, size_t max_pool_size) ;
// example usage:
// rmm_wrap_initialize (rmm_wrap_managed, INT32_MAX, INT64_MAX) ;
// GxB_init (GrB_NONBLOCKING, rmm_wrap_malloc, rmm_wrap_calloc, rmm_wrap_realloc, rmm_wrap_free) ;
// use GraphBLAS ...
// GrB_finalize ( ) ;
// rmm_wrap_finalize ( ) ;
// The two PMR-based allocate/deallocate signatures (C-style):
void *rmm_wrap_allocate (size_t *size) ;
void rmm_wrap_deallocate (void *p, size_t size) ;
// The four malloc/calloc/realloc/free signatures:
void *rmm_wrap_malloc (size_t size) ;
void *rmm_wrap_calloc (size_t n, size_t size) ;
void *rmm_wrap_realloc (void *p, size_t newsize) ;
void rmm_wrap_free (void *p) ;
#ifdef __cplusplus
}
#endif
#endif
#endif
|
protosplittest.c | #include "protosplittest.h"
void parallelize(split_fn fn, void * output1, void * output2, void * output3, uint32_t * input, int leneach, void * pd1, void* pd2) {
#pragma omp parallel num_threads(3)
{
//OpenMP seems to get along with obliv-c just fine, so long as obliv-c only uses the master thread.
#pragma omp master
{
fn(output1, input, leneach, NULL);
}
#pragma omp single
{
#pragma omp task
fn(output2, &input[leneach], leneach, pd1);
#pragma omp task
fn(output3, &input[2*leneach], leneach, pd2);
}
}
} |
trsm_x_sky_n_hi_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < m; r++)
{
const ALPHA_INT indx = A->pointers[r + 1] - 1;
diag[r] = A->values[indx];
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT c = A->cols - 1; c >= 0; c--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ic = A->cols - 1; ic > c; ic--)
{
ALPHA_INT start = A->pointers[ic];
ALPHA_INT end = A->pointers[ic + 1];
ALPHA_INT eles_num = ic - c;
if(end - eles_num - 1 >= start)
alpha_madde(temp, A->values[end - eles_num - 1], y[out_y_col * ldy + ic]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[out_y_col * ldx + c]);
alpha_sub(t, t, temp);
alpha_div(y[out_y_col * ldy + c], t, diag[c]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
/// The location of the declaration name itself.
SourceLocation Loc;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 3;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 3;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
kernel_cos.c | /*! @copyright (c) 2017 King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
*
* STARS-H is a software package, provided by King Abdullah
* University of Science and Technology (KAUST)
*
* @generate NDIM -> n 1 2 3 4
* Generate different functions for different dimensions. This hack improves
* performance in certain cases. Value 'n' stands for general case, whereas all
* other values correspond to static values of dimensionality.
* During code generation step, each appearance of @NDIM (including this one)
* will be replace by proposed values. If you want to use this file outside
* STARS-H, simply do substitutions yourself.
*
* @file src/applications/electrodynamics/kernel_cos.c
* @version 0.3.0
* @author Aleksandr Mikhalev
* @date 2017-11-07
*/
#include "common.h"
#include "starsh.h"
#include "starsh-electrodynamics.h"
// If dimensionality is static
#if (@NDIM != n)
//! Replace variable ndim with static integer value
#define ndim @NDIM
#endif
void starsh_eddata_block_cos_kernel_@NDIMd(int nrows, int ncols,
STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data,
void *result, int ld)
//! Helmholtz cos for @NDIM-dimensional electrodynamics problem.
/*! Fills matrix \f$ A \f$ with values
* \f[
* A_{ij} = \frac{cos(k r_{ij})}{r_{ij}},
* \f]
* \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial
* points and \f$ k \f$ is a wave number. No memory is allocated in this
* function!
*
* @param[in] nrows: Number of rows of \f$ A \f$.
* @param[in] ncols: Number of columns of \f$ A \f$.
* @param[in] irow: Array of row indexes.
* @param[in] icol: Array of column indexes.
* @param[in] row_data: Pointer to physical data (@ref STARSH_eddata object).
* @param[in] col_data: Pointer to physical data (@ref STARSH_eddata object).
* @param[out] result: Pointer to memory of \f$ A \f$.
* @param[in] ld: Leading dimension of `result`.
* @sa starsh_eddata_block_cos_kernel_1d(),
* starsh_eddata_block_cos_kernel_2d(),
* starsh_eddata_block_cos_kernel_3d(),
* starsh_eddata_block_cos_kernel_4d(),
* starsh_eddata_block_cos_kernel_nd().
* @ingroup app-electrodynamics-kernels
* */
{
int i, j, k;
STARSH_eddata *data1 = row_data;
STARSH_eddata *data2 = col_data;
double tmp, dist;
// Read parameters
// If dimensionality is not static
#if (@NDIM == n)
int ndim = data1->particles.ndim;
#endif
// Get coordinates
STARSH_int count1 = data1->particles.count;
STARSH_int count2 = data2->particles.count;
double *x1[ndim], *x2[ndim];
double wave_k = data1->k;
double diag = data1->diag;
x1[0] = data1->particles.point;
x2[0] = data2->particles.point;
//#pragma omp simd
for(i = 1; i < ndim; i++)
{
x1[i] = x1[0]+i*count1;
x2[i] = x2[0]+i*count2;
}
double *x1_cur, *x2_cur;
double *buffer = result;
// Fill column-major matrix
//#pragma omp simd
for(j = 0; j < ncols; j++)
{
for(i = 0; i < nrows; i++)
{
dist = 0.0;
for(k = 0; k < ndim; k++)
{
tmp = x1[k][irow[i]]-x2[k][icol[j]];
dist += tmp*tmp;
}
if(dist == 0)
buffer[j*(size_t)ld+i] = diag;
else
{
dist = sqrt(dist);
buffer[j*(size_t)ld+i] = cos(wave_k*dist)/dist;
}
}
}
}
void starsh_eddata_block_cos_kernel_@NDIMd_simd(int nrows, int ncols,
STARSH_int *irow, STARSH_int *icol, void *row_data, void *col_data,
void *result, int ld)
//! Helmholtz cos for @NDIM-dimensional electrodynamics problem.
/*! Fills matrix \f$ A \f$ with values
* \f[
* A_{ij} = \frac{cos(k r_{ij})}{r_{ij}},
* \f]
* \f$ r_{ij} \f$ is a distance between \f$i\f$-th and \f$j\f$-th spatial
* points and \f$ k \f$ is a wave number. No memory is allocated in this
* function!
*
* Uses SIMD instructions.
*
* @param[in] nrows: Number of rows of \f$ A \f$.
* @param[in] ncols: Number of columns of \f$ A \f$.
* @param[in] irow: Array of row indexes.
* @param[in] icol: Array of column indexes.
* @param[in] row_data: Pointer to physical data (@ref STARSH_eddata object).
* @param[in] col_data: Pointer to physical data (@ref STARSH_eddata object).
* @param[out] result: Pointer to memory of \f$ A \f$.
* @param[in] ld: Leading dimension of `result`.
* @sa starsh_eddata_block_cos_kernel_1d_simd(),
* starsh_eddata_block_cos_kernel_2d_simd(),
* starsh_eddata_block_cos_kernel_3d_simd(),
* starsh_eddata_block_cos_kernel_4d_simd(),
* starsh_eddata_block_cos_kernel_nd_simd().
* @ingroup app-electrodynamics-kernels
* */
{
int i, j, k;
STARSH_eddata *data1 = row_data;
STARSH_eddata *data2 = col_data;
double tmp, dist;
// Read parameters
// If dimensionality is not static
#if (@NDIM == n)
int ndim = data1->particles.ndim;
#endif
// Get coordinates
STARSH_int count1 = data1->particles.count;
STARSH_int count2 = data2->particles.count;
double *x1[ndim], *x2[ndim];
double wave_k = data1->k;
double diag = data1->diag;
x1[0] = data1->particles.point;
x2[0] = data2->particles.point;
#pragma omp simd
for(i = 1; i < ndim; i++)
{
x1[i] = x1[0]+i*count1;
x2[i] = x2[0]+i*count2;
}
double *x1_cur, *x2_cur;
double *buffer = result;
// Fill column-major matrix
#pragma omp simd
for(j = 0; j < ncols; j++)
{
for(i = 0; i < nrows; i++)
{
dist = 0.0;
for(k = 0; k < ndim; k++)
{
tmp = x1[k][irow[i]]-x2[k][icol[j]];
dist += tmp*tmp;
}
if(dist == 0)
buffer[j*(size_t)ld+i] = diag;
else
{
dist = sqrt(dist);
buffer[j*(size_t)ld+i] = cos(wave_k*dist)/dist;
}
}
}
}
|
omp_single_private_nothreadprivate.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_single_private()
{
int nr_threads_in_single;
int result;
int nr_iterations;
int i;
nr_threads_in_single = 0;
nr_iterations = 0;
result = 0;
#pragma omp parallel private(i)
{
int myresult = 0;
int myit = 0;
for (i = 0; i < LOOPCOUNT; i++) {
#pragma omp single private(nr_threads_in_single) nowait
{
nr_threads_in_single = 0;
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
myit++;
myresult = myresult + nr_threads_in_single;
}
}
#pragma omp critical
{
result += nr_threads_in_single;
nr_iterations += myit;
}
}
return ((result == 0) && (nr_iterations == LOOPCOUNT));
} /* end of check_single private */
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_single_private()) {
num_failed++;
}
}
return num_failed;
}
|
GB_unop__signum_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__signum_fp64_fp64)
// op(A') function: GB (_unop_tran__signum_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = GB_signum (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_signum (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = GB_signum (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIGNUM || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__signum_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = GB_signum (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = GB_signum (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__signum_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
meta.h | #include <iostream>
#include <vector>
#include <string>
#include <omp.h>
#include "formula.h"
using namespace std;
template<int N>
class formula_generator{
public:
static inline void EXEC(ResultsWriter & writer, int elcount, int minimum[], int maximum[], double masses[], int *current, double pre_mass, double loMass, double hiMass, int k)
{
int c = min(int((hiMass-pre_mass)/masses[N-1]),maximum[N-1]);
if (N == elcount)
{
#pragma omp parallel for
for (k = minimum[N-1]; k<=c; k++)
{
current[N-1] = k;
double current_mass_i= pre_mass+masses[N-1]*k;
int *current_i = new int [elcount];
for (int i=0;i<elcount;i++)
{
current_i[i]=current[i];
}
formula_generator<N-1>::EXEC(writer, elcount, minimum, maximum, masses, current_i, current_mass_i, loMass, hiMass, k);
delete []current_i;
}
}
else
{
for (int i = minimum[N-1]; i<=c; i++)
{
current[N-1] = i;
double current_mass_i= pre_mass+masses[N-1]*i;
formula_generator<N-1>::EXEC(writer, elcount, minimum, maximum, masses, current, current_mass_i, loMass, hiMass, k);
}
}
}
};
template<>
class formula_generator<0>{
public:
static inline void EXEC(ResultsWriter & writer, int elcount, int minimum[], int maximum[], double masses[], int *current, double pre_mass, double loMass, double hiMass, int k)
{
if (pre_mass >= loMass && pre_mass <= hiMass )
{
#pragma omp critical (p_result)
{
for ( int i = 0; i < elcount; ++i)
{
if (i != elcount-1)
{
writer.p_result->data[writer.p_result->len*elcount+i] = current[i];
}
else
{
writer.p_result->data[writer.p_result->len*elcount+i] = k;
}
}
writer.p_result->mass[writer.p_result->len] = pre_mass;
writer.p_result->len++;
if (writer.p_result->len==writer.m_poolSize)
{
writer.writeResults();
writer.p_result->len = 0;
}
}
}
}
}; |
mph.h | #ifndef PERFECTHASH_H_
#define PERFECTHASH_H_
#include <iostream>
#include <cstdint>
#include <cstring>
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <bf.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
extern "C" {
#include <cmph.h>
}
#include "../seq/types.h"
#include "counts_table.h"
// exact counts table using a minimal perfect hash function
class mphf_table_t : public counts_table_t {
public:
static const int MIN_KMER_COUNT = 1;
typedef struct {
char val[8];
} key_t; // converted struct key to match the cmph API
cmph_t* hash; // minimal perfect hash function
uint64 n_keys; // number of distinct keys
counter_t* counts; // array of counts
bf::basic_bloom_filter* bf; // bloom filter of distinct keys used during mphf construction
mphf_table_t() {} // todo: clean up empty constructor, refactor
mphf_table_t(std::vector<kmer_2bit_t>& keys) {
std::cout << "Sorting and counting... " << keys.size() << " kmers\n";
std::sort(keys.begin(), keys.end());
std::vector<kmer_2bit_t> keys_distinct;
std::vector<counter_t> key_counts;
keys_distinct.reserve(keys.size());
key_counts.reserve(keys.size());
counter_t c = 1;
for(uint64 i = 1; i < keys.size(); i++) {
if(keys[i] == keys[i-1]) {
if(c < std::numeric_limits<counter_t>::max()) {
c++;
}
} else {
if(c > MIN_KMER_COUNT) {
keys_distinct.push_back(keys[i-1]);
key_counts.push_back(c);
}
c = 1;
}
}
if(c > MIN_KMER_COUNT) {
keys_distinct.push_back(keys[keys.size()-1]);
key_counts.push_back(c);
}
n_keys = keys_distinct.size();
//std::cout << "Number of distinct kmers: " << n_keys << "\n";
keys.clear();
init(keys_distinct, key_counts);
}
void init(std::vector<kmer_2bit_t>& keys, const std::vector<counter_t>& key_counts) {
// construct the mphf
std::cout << "Building the mphf...\n";
key_t* key_structs = new key_t[n_keys];
#pragma omp parallel for
for(uint64 i = 0; i < n_keys; i++) {
get_key(keys[i], key_structs[i]);
}
build_mpfh(key_structs);
free(key_structs);
// store the keys in a bloom filter
std::cout << "Building the bloom filter...\n";
bf = new bf::basic_bloom_filter(0.05, n_keys, 0, false, false);
for(uint64 i = 0; i < n_keys; i++) {
bf->add(keys[i]);
}
// count all input keys using the mphf
std::cout << "Scattering counts...\n";
counts = new counter_t[n_keys];
#pragma omp parallel for
for(uint64 i = 0; i < n_keys; i++) {
const unsigned int id = get_id(keys[i]);
counts[id] = key_counts[i];
}
keys.clear();
std::cout << "Index construction done!\n";
}
static inline void get_key(const kmer_2bit_t& key_in, key_t& key_out) {
std::memcpy(key_out.val, &key_in, sizeof(key_out.val));
}
inline unsigned int get_id(const kmer_2bit_t& key) const {
key_t key_struct;
get_key(key, key_struct);
return cmph_search(hash, key_struct.val, sizeof(key_struct.val));
}
void build_mpfh(key_t* keys) {
cmph_io_adapter_t* source = cmph_io_struct_vector_adapter(keys, (cmph_uint32) sizeof(key_t), 0, sizeof(key_t), n_keys);
cmph_config_t* config = cmph_config_new(source);
cmph_config_set_algo(config, CMPH_BDZ);
hash = cmph_new(config);
if(hash == NULL) {
std::cout << "ERROR: null hash \n";
exit(-1);
}
cmph_config_destroy(config);
}
virtual ~mphf_table_t() {
cmph_destroy(hash);
bf->clear();
free(bf);
free(counts);
}
virtual void clear() {}
// insert a kmer into the sketch (the kmer must be one of the distinct keys)
virtual void insert(const kmer_2bit_t& key, const int stream_id) {
const unsigned int id = get_id(key);
counter_t newval, curr;
do {
curr = counts[id];
if(curr == std::numeric_limits<counter_t>::max()) {
return;
}
newval = curr + 1;
} while (!__sync_bool_compare_and_swap(&counts[id], curr, newval));
}
// lookup the kmer count in the sketch
virtual counter_t lookup(const kmer_2bit_t& key, const int stream_id) const {
// check the bloom filter to ensure the key is part of the sketch
if(!bf->lookup(key)) return 0;
return counts[get_id(key)];
}
// write the table to file
virtual void save_to_file(const std::string& fname, const int n_count_bits) {
save_mphf_aux(fname);
std::ofstream file;
file.open(fname.c_str(), std::ios::out | std::ios::binary | std::ios::app);
file.write(reinterpret_cast<char*>(&n_keys), sizeof(n_keys));
// bloom // TEMP getting around the lack of io in the bf lib
const bf::bitvector& bits = bf->storage();
long int bits_size = bits.bits_.size();
file.write(reinterpret_cast<const char*>(&bits.num_bits_), sizeof(bits.num_bits_));
file.write(reinterpret_cast<char*>(&bits_size), sizeof(bits_size));
file.write(reinterpret_cast<const char*>(&bits.bits_[0]), bits_size*sizeof(bits.bits_[0]));
if(n_count_bits == std::numeric_limits<counter_t>::digits) {
file.write(reinterpret_cast<char*>(&counts[0]), n_keys*sizeof(counter_t));
} else {
int n_clipped = 0;
uint8 maxv = 255; // reduce to char temp
for(uint64 i = 0; i < n_keys; i++) {
if(counts[i] >= maxv) {
file.write(reinterpret_cast<char*>(&maxv), sizeof(uint8));
n_clipped++;
}
else {
file.write(reinterpret_cast<char*>(&counts[i]), sizeof(uint8));
}
}
std::cout << "Number of clipped counts: " << n_clipped << "\n";
}
file.close();
}
void save_mphf_aux(const std::string& fname) {
FILE* mphf_fd = fopen(fname.c_str(), "a");
cmph_dump(hash, mphf_fd);
fclose(mphf_fd);
}
// load the table from file
virtual long int load_from_file(const std::string& fname, long int file_offset) {
long int file_pos = load_mpfh_aux(fname, file_offset);
std::ifstream file;
file.open(fname.c_str(), std::ios::in | std::ios::binary);
file.seekg(file_pos, file.beg);
file.read(reinterpret_cast<char*>(&n_keys), sizeof(n_keys));
// bloom
bf = new bf::basic_bloom_filter(0.05, n_keys, 0, false, false);
size_t nbits;
file.read(reinterpret_cast<char*>(&nbits), sizeof(nbits));
long int bits_size;
file.read(reinterpret_cast<char*>(&bits_size), sizeof(bits_size));
file.read(reinterpret_cast<char*>(&bf->bits_.bits_[0]), bits_size*sizeof(bf->bits_.bits_[0]));
counts = new counter_t[n_keys];
file.read(reinterpret_cast<char*>(&counts[0]), n_keys*sizeof(counter_t));
long int s = file.tellg();
file.close();
return s;
}
long int load_mpfh_aux(const std::string& fname, long int file_offset) {
FILE* mphf_fd = fopen(fname.c_str(), "r");
fseek(mphf_fd, file_offset, SEEK_SET);
hash = cmph_load(mphf_fd);
long int s = ftell(mphf_fd);
fclose(mphf_fd);
return s;
}
// create the mphf for a given set of keys and save to disk
// + bloom filter
// note: all input keys are expected to be distinct
void build_and_save_mphf_aux(const std::string& distinct_keys_fname) {
std::ifstream keys_file;
keys_file.open(distinct_keys_fname.c_str(), std::ios::in | std::ios::binary);
if (!keys_file.is_open()) {
std::cerr << "ERROR: Could not open keys file: " << distinct_keys_fname << "\n";
exit(1);
}
std::cout << "Loading..." << distinct_keys_fname << "\n";
keys_file.seekg(0, std::ios::end);
long long int size = keys_file.tellg();
keys_file.seekg(0, std::ios::beg);
n_keys = size/sizeof(kmer_2bit_t);
key_t* keys = new key_t[n_keys];
std::cout << "Expected number of keys: " << n_keys << "\n";
keys_file.read(reinterpret_cast<char*>(&keys[0]), n_keys*sizeof(key_t));
std::cout << "Finished loading the keys \n";
// function
std::string mphf_fname = std::string(distinct_keys_fname);
mphf_fname += std::string(".mph");
build_mpfh(keys);
save_mphf_aux(mphf_fname);
free(keys);
}
virtual void print_stats() {
std::cout << "Number of distinct keys: " << n_keys << "\n";
}
virtual int get_n_streams() {
return 1;
}
};
#endif
|
middle2r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simmulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 6
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
//int initial_seed = 0x5EC7F2B0;
//int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 100*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int ID = omp_get_thread_num();
init_prng(ID);
int num = 0;
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int n = 5; // Number of indipendent experiments
int R = 2; // Number of rounds
int ver = 1; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "0002000000020002";
char dc_str[] = "2000009009002000"; //2000009009002000
char dk1_str[] = "000000C0000000000000001000000000";
char dk2_str[] = "04000000000000000800000000000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of paralle threads : N1
int deg = 10;
int N2 = 1 << deg; // Number of bunches per threads : N2 = 2^(deg)
int N3 = 1024; // Number of queries per bunches : N3
//################### Number of total queries : N1*N2*N3 ###############
double sum = 0;
for (int i = 0; i < n; i++)
{
sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
}
printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
// sum = (double)(n * N1 * N2 * N3) / sum;
// printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2));
return 0;
}
|
close_manual.c | // RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu
#include <omp.h>
#include <stdio.h>
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
extern void __tgt_register_requires(int64_t);
extern void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
void **args_base, void **args,
int64_t *arg_sizes, int64_t *arg_types);
extern void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
void **args_base, void **args,
int64_t *arg_sizes, int64_t *arg_types);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
#pragma omp requires unified_shared_memory
#define N 1024
int main(int argc, char *argv[]) {
int fails;
void *host_alloc = 0, *device_alloc = 0;
int *a = (int *)malloc(N * sizeof(int));
// Manual registration of requires flags for Clang versions
// that do not support requires.
__tgt_register_requires(8);
// Init
for (int i = 0; i < N; ++i) {
a[i] = 10;
}
host_alloc = &a[0];
// Dummy target region that ensures the runtime library is loaded when
// the target data begin/end functions are manually called below.
#pragma omp target
{}
// Manual calls
int device_id = omp_get_default_device();
int arg_num = 1;
void **args_base = (void **)&a;
void **args = (void **)&a;
int64_t arg_sizes[arg_num];
arg_sizes[0] = sizeof(int) * N;
int64_t arg_types[arg_num];
// Ox400 enables the CLOSE map type in the runtime:
// OMP_TGT_MAPTYPE_CLOSE = 0x400
// OMP_TGT_MAPTYPE_TO = 0x001
arg_types[0] = 0x400 | 0x001;
device_alloc = host_alloc;
__tgt_target_data_begin(device_id, arg_num, args_base, args, arg_sizes,
arg_types);
#pragma omp target data use_device_ptr(a)
{ device_alloc = a; }
__tgt_target_data_end(device_id, arg_num, args_base, args, arg_sizes,
arg_types);
// CHECK: a was copied to the device
if (device_alloc != host_alloc)
printf("a was copied to the device\n");
free(a);
// CHECK: Done!
printf("Done!\n");
return 0;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#else
int omp_get_max_threads () { return 1; }
#endif /* _OPENMP */
#include "ScaleME.h"
#include "precision.h"
#include "itsolver.h"
#include "measure.h"
#include "direct.h"
#include "config.h"
#include "mlfma.h"
#include "util.h"
#include "io.h"
void usage (char *);
void usage (char *name) {
fprintf (stderr, "Usage: %s [-d] [-l #] [-a #] [-n #] [-b] [-f x,y,z,a]\n"
" [-o <prefix>] -s <src> -r <obs> -i <prefix>\n", name);
fprintf (stderr, " -i: Specify input file prefix\n");
fprintf (stderr, " -o: Specify output file prefix (defaults to input prefix)\n");
fprintf (stderr, " -d: Debug mode (prints induced field); specify twice to write after every restart\n");
fprintf (stderr, " -b: Use BiCG-STAB instead of GMRES\n");
fprintf (stderr, " -a: Use ACA far-field transformations, or SVD when tolerance is negative\n");
fprintf (stderr, " -l: Use loose GMRES with the specified number of augmented vectors\n");
fprintf (stderr, " -n: Specify number of points for near-field integration\n");
fprintf (stderr, " -s: Specify the source location or range\n");
fprintf (stderr, " -r: Specify the observation range\n");
fprintf (stderr, " -f: Specify a focal axis x,y,z and width a for the incident field\n");
}
int main (int argc, char **argv) {
char ch, *inproj = NULL, *outproj = NULL, **arglist, fname[1024],
fldfmt[1024], guessfmt[1024], *srcspec = NULL, *obspec = NULL;
int mpirank, mpisize, i, j, k, nit, gsize[3];
int debug = 0, maxobs, useaca = 0, usebicg = 0, useloose = 0, usedir = 0;
int numsrcpts = 5;
cplx *rhs, *sol, *inc, *field;
double cputime, wtime;
long nelt;
real acatol = -1;
real dir[4];
augspace aug;
measdesc obsmeas, srcmeas;
solveparm solver;
MPI_Init (&argc, &argv);
MPI_Comm_rank (MPI_COMM_WORLD, &mpirank);
MPI_Comm_size (MPI_COMM_WORLD, &mpisize);
if (!mpirank) fprintf (stderr, "Square-cell acoustic MLFMA.\n");
fprintf (stderr, "MPI Rank %d, pid %d, %d threads\n",
mpirank, getpid(), omp_get_max_threads());
arglist = argv;
while ((ch = getopt (argc, argv, "i:o:dba:hl:n:s:r:f:")) != -1) {
switch (ch) {
case 'i':
inproj = optarg;
break;
case 'o':
outproj = optarg;
break;
case 'd':
debug = 1;
break;
case 'b':
usebicg = 1;
break;
case 'a':
acatol = strtod(optarg, NULL);
useaca = 1;
break;
case 'l':
useloose = strtol(optarg, NULL, 0);
break;
case 'n':
numsrcpts = strtol(optarg, NULL, 0);
break;
case 'r':
obspec = optarg;
break;
case 's':
srcspec = optarg;
break;
case 'f':
dir[0] = strtod(strtok(optarg, ","), NULL);
dir[1] = strtod(strtok(NULL, ","), NULL);
dir[2] = strtod(strtok(NULL, ","), NULL);
dir[3] = strtod(strtok(NULL, ","), NULL);
usedir = 1;
break;
default:
if (!mpirank) usage (arglist[0]);
MPI_Abort (MPI_COMM_WORLD, EXIT_FAILURE);
}
}
/* The project name must be specified. */
if (!inproj || !srcspec || !obspec) {
if (!mpirank) usage (arglist[0]);
MPI_Abort (MPI_COMM_WORLD, EXIT_FAILURE);
}
if (!outproj) outproj = inproj;
if (!mpirank) fprintf (stderr, "Reading configuration file.\n");
/* Read the basic configuration. */
sprintf (fname, "%s.input", inproj);
getconfig (fname, &solver, NULL);
/* Build the source and observer location specifiers. */
buildsrc (&srcmeas, srcspec);
buildobs (&obsmeas, obspec);
/* Initialize ScaleME and find the local basis set. */
ScaleME_preconf (useaca);
ScaleME_getListOfLocalBasis (&(fmaconf.numbases), &(fmaconf.bslist));
nelt = (long)fmaconf.numbases * (long)fmaconf.bspboxvol;
/* Allocate the RHS vector, solution and incident field. */
rhs = malloc (3 * nelt * sizeof(cplx));
sol = rhs + nelt;
inc = sol + nelt;
/* Allocate the local portion of the contrast storage. */
fmaconf.contrast = malloc (nelt * sizeof(cplx));
/* Allocate the observation array. */
field = malloc (obsmeas.count * sizeof(cplx));
/* Store the grid size for printing of field values. */
gsize[0] = fmaconf.nx; gsize[1] = fmaconf.ny; gsize[2] = fmaconf.nz;
if (!mpirank) fprintf (stderr, "Reading local portion of contrast file.\n");
/* Read the contrast for the local basis set. */
sprintf (fname, "%s.contrast", inproj);
/* Try both formats. */
getctgrp (fmaconf.contrast, fname, gsize,
fmaconf.bslist, fmaconf.numbases, fmaconf.bspbox);
/* First build the integration rules that will be used. */
bldintrules (numsrcpts, 0);
/* Precalculate some values for the FMM and direct interactions. */
fmmprecalc (acatol, useaca);
i = dirprecalc (numsrcpts);
if (!mpirank) fprintf (stderr, "Finished precomputing %d near interactions.\n", i);
/* Finish the ScaleME initialization. */
ScaleME_postconf ();
/* Build the root interpolation matrix for measurements. */
ScaleME_buildRootInterpMat (obsmeas.imat, fmaconf.interpord,
obsmeas.ntheta, obsmeas.nphi, obsmeas.trange, obsmeas.prange);
/* Find the width of the integer label in the field name. */
i = (int)ceil(log10(srcmeas.count));
sprintf (fldfmt, "%%s.tx%%0%dd.farfld", i);
sprintf (guessfmt, "%%s.tx%%0%dd.%%s", i);
if (!mpirank) fprintf (stderr, "Initialization complete.\n");
/* Ensure each process is waiting at the start of the loop. */
MPI_Barrier (MPI_COMM_WORLD);
/* Initialize the loose GMRES buffer. */
if (useloose > 0) {
aug.start = -1;
aug.nmax = useloose;
aug.ntot = 0;
aug.z = malloc(2 * aug.nmax * nelt * sizeof(cplx));
aug.az = aug.z + aug.nmax * nelt;
}
for (i = 0; i < srcmeas.count; ++i) {
if (!mpirank)
fprintf (stderr, "Running simulation for source %d.\n", i + 1);
/* Attempt to read the pre-computed RHS from a file.
* If this fails, build the RHS directly. */
sprintf (fname, guessfmt, inproj, i, "rhs");
if (!getctgrp (rhs, fname, gsize, fmaconf.bslist,
fmaconf.numbases, fmaconf.bspbox)) {
if (!mpirank) fprintf (stderr, "Building RHS.\n");
/* Integrate the incident field over each cell. */
buildrhs (inc, srcmeas.locations + 3 * i, srcmeas.plane, usedir ? dir : NULL);
/* Convert the integrated field to the average. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < nelt; ++j) inc[j] /= fmaconf.cellvol;
/* Apply the scattering integral to the incident field. */
matvec(rhs, inc, sol, 0);
} else {
/* If the RHS was read, it is the incident field.
* Set the incident field in inc to zero. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < nelt; ++j) inc[j] = 0.0;
}
/* Attempt to read an initial first guess from a file. */
sprintf (fname, guessfmt, inproj, i, "guess");
k = getctgrp (sol, fname, gsize, fmaconf.bslist,
fmaconf.numbases, fmaconf.bspbox);
/* Restart if the true residual is not sufficiently low. */
for (j = 0, nit = 1; j < solver.restart && nit > 0; ++j) {
/* Reset the debug tripwire to ensure the final output
* is written if intermediate writes were performed. */
debug = debug ? 1 : 0;
cputime = (double)clock() / CLOCKS_PER_SEC;
wtime = MPI_Wtime();
if (usebicg) nit = bicgstab (rhs, sol, k || j,
solver.maxit, solver.epscg, 0);
else nit = gmres (rhs, sol, k || j, solver.maxit,
solver.epscg, 0, useloose > 0 ? &aug : NULL);
cputime = (double)clock() / CLOCKS_PER_SEC - cputime;
wtime = MPI_Wtime() - wtime;
if (!mpirank) {
fprintf (stderr, "CPU time for solution: %0.6g\n", cputime);
fprintf (stderr, "Wall time for solution: %0.6g\n", wtime);
}
/* Update the total field before every restart, but
* only if the tripwire file exists. */
sprintf (fname, guessfmt, inproj, i, "volwrite");
if (!access (fname, F_OK)) {
/* The master process should unlink the tripwire. */
MPI_Barrier (MPI_COMM_WORLD);
if (!mpirank) unlink (fname);
sprintf (fname, guessfmt, outproj, i, "scatfld");
prtctgrp (fname, sol, gsize, fmaconf.bslist,
fmaconf.numbases, fmaconf.bspbox);
/* If restarts have finished, avoid an
* immediate rewrite of this field. */
debug = debug ? 2 : 0;
}
}
/* Write the scattered field if desired and not just written. */
if (debug == 1) {
sprintf (fname, guessfmt, outproj, i, "scatfld");
prtctgrp (fname, sol, gsize, fmaconf.bslist,
fmaconf.numbases, fmaconf.bspbox);
}
/* Convert the scattered field to the total field. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < nelt; ++j) sol[j] += inc[j];
/* Write the total field if desired and not just written. */
if (debug > 0) {
sprintf (fname, guessfmt, outproj, i, "totfld");
prtctgrp (fname, sol, gsize, fmaconf.bslist,
fmaconf.numbases, fmaconf.bspbox);
}
/* Convert total field into contrast current. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < nelt; ++j) sol[j] *= fmaconf.contrast[j];
/* Compute and write out all observation fields. */
farfield (sol, &obsmeas, field);
/* Append the field for the current transmitter. */
if (!mpirank) {
sprintf (fname, fldfmt, outproj, i);
writefld (fname, obsmeas.nphi, obsmeas.ntheta, field);
}
}
ScaleME_finalizeParHostFMA ();
freedircache ();
delmeas (&srcmeas);
delmeas (&obsmeas);
delintrules ();
if (useloose > 0) free (aug.z);
free (rhs);
free (field);
free (fmaconf.contrast);
free (fmaconf.radpats);
MPI_Barrier (MPI_COMM_WORLD);
MPI_Finalize ();
return EXIT_SUCCESS;
}
|
GB_add_phase0.c | //------------------------------------------------------------------------------
// GB_add_phase0: find vectors of C to compute for C=A+B or C<M>=A+B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// The eWise add of two matrices, C=A+B, C<M>=A+B, or C<!M>=A+B starts with
// this phase, which determines which vectors of C need to be computed.
// This phase is also used for GB_masker, and for GB_SUBASSIGN_TWO_SLICE.
// On input, A and B are the two matrices being added, and M is the optional
// mask matrix (not complemented). The complemented mask is handed in GB_mask,
// not here.
// On output, an integer (Cnvec) a boolean (Ch_to_Mh) and up to 3 arrays are
// returned, either NULL or of size Cnvec. Let n = A->vdim be the vector
// dimension of A, B, M and C.
// Ch: the list of vectors to compute. If not NULL, Ch [k] = j is the
// kth vector in C to compute, which will become the hyperlist C->h of C.
// Note that some of these vectors may turn out to be empty, because of
// the mask, or because the vector j appeared in A or B, but is empty.
// It is pruned at the end of GB_add_phase2. If Ch is NULL then it is an
// implicit list of size n, and Ch [k] == k for all k = 0:n-1. In this
// case, C will be a sparse matrix, not hypersparse. Thus, the kth
// vector is j = GBH (Ch, k).
// Ch is freed by GB_add if phase1 fails. phase2 either frees it or
// transplants it into C, if C is hypersparse.
// Ch_is_Mh: true if the mask M is present, hypersparse, and not
// complemented, false otherwise. In this case Ch is a deep copy of Mh.
// Only GB_add uses this option; it is not used by GB_masker or
// GB_SUBASSIGN_TWO_SLICE (Ch_is_Mh is always false in this case). This
// is determined by passing in p_Ch_is_Mh as a NULL or non-NULL pointer.
// C_to_A: if A is hypersparse, then C_to_A [k] = kA if the kth vector,
// j = GBH (Ch, k) appears in A, as j = Ah [kA]. If j does not appear in
// A, then C_to_A [k] = -1. If A is not hypersparse, then C_to_A is
// returned as NULL.
// C_to_B: if B is hypersparse, then C_to_B [k] = kB if the kth vector,
// j = GBH (Ch, k) appears in B, as j = Bh [kB]. If j does not appear in
// B, then C_to_B [k] = -1. If B is not hypersparse, then C_to_B is
// returned as NULL.
// C_to_M: if M is hypersparse, and Ch_is_Mh is false, then C_to_M [k] =
// kM if the kth vector, j = GBH (Ch, k) appears in M, as j = Mh [kM]. If
// j does not appear in M, then C_to_M [k] = -1. If M is not hypersparse,
// then C_to_M is returned as NULL.
// M, A, B: any sparsity structure (hypersparse, sparse, bitmap, or full)
// C: not present here, but its sparsity structure is finalized, via the
// input/output parameter C_sparsity.
#include "GB_add.h"
#define GB_FREE_WORK \
{ \
GB_FREE (kA_start) ; \
GB_FREE (kB_start) ; \
GB_FREE (kC_start) ; \
}
//------------------------------------------------------------------------------
// GB_allocate_result
//------------------------------------------------------------------------------
static inline bool GB_allocate_result
(
int64_t Cnvec,
int64_t *GB_RESTRICT *Ch_handle,
int64_t *GB_RESTRICT *C_to_M_handle,
int64_t *GB_RESTRICT *C_to_A_handle,
int64_t *GB_RESTRICT *C_to_B_handle
)
{
bool ok = true ;
if (Ch_handle != NULL)
{
(*Ch_handle) = GB_MALLOC (Cnvec, int64_t) ;
ok = (*Ch_handle != NULL) ;
}
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = GB_MALLOC (Cnvec, int64_t) ;
ok = ok && (*C_to_M_handle != NULL) ;
}
if (C_to_A_handle != NULL)
{
*C_to_A_handle = GB_MALLOC (Cnvec, int64_t) ;
ok = ok && (*C_to_A_handle != NULL) ;
}
if (C_to_B_handle != NULL)
{
*C_to_B_handle = GB_MALLOC (Cnvec, int64_t) ;
ok = ok && (*C_to_B_handle != NULL) ;
}
if (!ok)
{
// out of memory
if (Ch_handle != NULL)
{
GB_FREE (*Ch_handle) ;
}
if (C_to_M_handle != NULL)
{
GB_FREE (*C_to_M_handle) ;
}
if (C_to_A_handle != NULL)
{
GB_FREE (*C_to_A_handle) ;
}
if (C_to_B_handle != NULL)
{
GB_FREE (*C_to_B_handle) ;
}
}
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_add_phase0: find the vectors of C for C<M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB_add_phase0 // find vectors in C for C=A+B or C<M>=A+B
(
int64_t *p_Cnvec, // # of vectors to compute in C
int64_t *GB_RESTRICT *Ch_handle, // Ch: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_M_handle, // C_to_M: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_A_handle, // C_to_A: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_B_handle, // C_to_B: of size Cnvec, or NULL
bool *p_Ch_is_Mh, // if true, then Ch == Mh
int *C_sparsity, // sparsity structure of C
const GrB_Matrix M, // optional mask, may be NULL; not complemented
const GrB_Matrix A, // first input matrix
const GrB_Matrix B, // second input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// M, A, and B can be jumbled for this phase, but not phase1 or phase2
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for add phase0", GB0) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ; // pattern not accessed
ASSERT (!GB_PENDING (M)) ;
ASSERT_MATRIX_OK (A, "A for add phase0", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (B)) ; // pattern not accessed
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (B, "B for add phase0", GB0) ;
ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (GB_JUMBLED_OK (A)) ; // pattern not accessed
ASSERT (!GB_PENDING (B)) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (A->vlen == B->vlen) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
ASSERT (GB_IMPLIES (M != NULL, A->vlen == M->vlen)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
(*p_Cnvec) = 0 ;
(*Ch_handle) = NULL ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
if (p_Ch_is_Mh != NULL)
{
(*p_Ch_is_Mh) = false ;
}
if ((*C_sparsity) == GxB_BITMAP || (*C_sparsity) == GxB_FULL)
{
// nothing to do in phase0 for C bitmap or full
(*p_Cnvec) = A->vdim ; // not needed; to be consistent with GB_emult
return (GrB_SUCCESS) ;
}
int64_t *GB_RESTRICT Ch = NULL ;
int64_t *GB_RESTRICT C_to_M = NULL ;
int64_t *GB_RESTRICT C_to_A = NULL ;
int64_t *GB_RESTRICT C_to_B = NULL ;
int64_t *GB_RESTRICT kA_start = NULL ;
int64_t *GB_RESTRICT kB_start = NULL ;
int64_t *GB_RESTRICT kC_start = NULL ;
int ntasks = 0 ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1 ; // nthreads depends on Cnvec, computed below
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t Cnvec ;
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
int64_t vlen = A->vlen ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
bool A_is_hyper = (Ah != NULL) ;
#define GB_Ah(k) (A_is_hyper ? Ah [k] : (k))
int64_t Bnvec = B->nvec ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
bool B_is_hyper = (Bh != NULL) ;
int64_t Mnvec = 0 ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mh = NULL ;
bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mp = M->p ;
Mh = M->h ;
}
// For GB_add, if M is present, hypersparse, and not complemented, then C
// will be hypersparse, and it will have set of vectors as M (Ch == Mh).
// For GB_masker, Ch is never equal to Mh.
bool Ch_is_Mh = (p_Ch_is_Mh != NULL) && (M != NULL && M_is_hyper) ;
//--------------------------------------------------------------------------
// find the set union of the non-empty vectors of A and B
//--------------------------------------------------------------------------
if (Ch_is_Mh)
{
//----------------------------------------------------------------------
// C and M are hypersparse, with the same vectors as the hypersparse M
//----------------------------------------------------------------------
(*C_sparsity) = GxB_HYPERSPARSE ;
ASSERT (M_is_hyper) ;
Cnvec = Mnvec ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, &Ch, NULL,
(A_is_hyper) ? (&C_to_A) : NULL, (B_is_hyper) ? (&C_to_B) : NULL))
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
// copy Mh into Ch. Ch is Mh so C_to_M is not needed.
GB_memcpy (Ch, Mh, Mnvec * sizeof (int64_t), nthreads) ;
// construct the mapping from C to A and B, if they are hypersparse
if (A_is_hyper || B_is_hyper)
{
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
if (A_is_hyper)
{
// C_to_A [k] = kA if Ah [kA] == j and A(:,j) is non-empty
int64_t kA = 0, pA, pA_end ;
GB_lookup (true, Ah, Ap, vlen, &kA, Anvec-1, j,
&pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
if (B_is_hyper)
{
// C_to_B [k] = kB if Bh [kB] == j and B(:,j) is non-empty
int64_t kB = 0, pB, pB_end ;
GB_lookup (true, Bh, Bp, vlen, &kB, Bnvec-1, j,
&pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
}
}
else if (A_is_hyper && B_is_hyper)
{
//----------------------------------------------------------------------
// A and B are hypersparse: C will be hypersparse
//----------------------------------------------------------------------
// Ch is the set union of Ah and Bh. This is handled with a parallel
// merge, since Ah and Bh are both sorted lists.
(*C_sparsity) = GxB_HYPERSPARSE ;
//----------------------------------------------------------------------
// create the tasks to construct Ch
//----------------------------------------------------------------------
double work = GB_IMIN (Anvec + Bnvec, n) ;
nthreads = GB_nthreads (work, chunk, nthreads_max) ;
ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, work) ;
// allocate workspace
kA_start = GB_MALLOC (ntasks+1, int64_t) ;
kB_start = GB_MALLOC (ntasks+1, int64_t) ;
kC_start = GB_MALLOC (ntasks+1, int64_t) ;
if (kA_start == NULL || kB_start == NULL || kC_start == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
kA_start [0] = (Anvec == 0) ? -1 : 0 ;
kB_start [0] = (Bnvec == 0) ? -1 : 0 ;
kA_start [ntasks] = (Anvec == 0) ? -1 : Anvec ;
kB_start [ntasks] = (Bnvec == 0) ? -1 : Bnvec ;
for (int taskid = 1 ; taskid < ntasks ; taskid++)
{
// create tasks: A and B are both hyper
double target_work = ((ntasks-taskid) * work) / ntasks ;
GB_slice_vector (NULL, NULL,
&(kA_start [taskid]), &(kB_start [taskid]),
0, 0, NULL, // Mi not present
0, Anvec, Ah, // Ah, explicit list
0, Bnvec, Bh, // Bh, explicit list
n, // Ah and Bh have dimension n
target_work) ;
}
//----------------------------------------------------------------------
// count the entries in Ch for each task
//----------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
int64_t kC = 0 ;
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// jA appears in A but not B
kA++ ;
}
else if (jB < jA)
{
// jB appears in B but not A
kB++ ;
}
else
{
// j = jA = jB appears in both A and B
kA++ ;
kB++ ;
}
}
kC_start [taskid] = kC + (kA_end - kA) + (kB_end - kB) ;
}
//----------------------------------------------------------------------
// cumulative sum of entries in Ch for each task
//----------------------------------------------------------------------
GB_cumsum (kC_start, ntasks, NULL, 1) ;
Cnvec = kC_start [ntasks] ;
//----------------------------------------------------------------------
// allocate the result: Ch and the mappings C_to_[MAB]
//----------------------------------------------------------------------
// C will be hypersparse, so Ch is allocated. The mask M is ignored
// for computing Ch. Ch is the set union of Ah and Bh.
if (!GB_allocate_result (Cnvec, &Ch,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// compute the result: Ch and the mappings C_to_[AB]
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kC = kC_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
// merge Ah and Bh into Ch
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = -1 ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
Ch [kC] = jB ;
C_to_A [kC] = -1 ; // jB does not appear in A
C_to_B [kC] = kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = kB++ ;
}
}
if (kA < kA_end)
{
// B is exhausted but A is not
for ( ; kA < kA_end ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
Ch [kC] = jA ;
C_to_A [kC] = kA ;
C_to_B [kC] = -1 ;
}
}
else if (kB < kB_end)
{
// A is exhausted but B is not
for ( ; kB < kB_end ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
Ch [kC] = jB ;
C_to_A [kC] = -1 ;
C_to_B [kC] = kB ;
}
}
ASSERT (kC == kC_start [taskid+1]) ;
}
//----------------------------------------------------------------------
// check result via a sequential merge
//----------------------------------------------------------------------
#ifdef GB_DEBUG
// merge Ah and Bh into Ch
int64_t kA = 0 ;
int64_t kB = 0 ;
int64_t kC = 0 ;
for ( ; kA < Anvec && kB < Bnvec ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == -1) ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ; // jB does not appear in A
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
}
if (kA < Anvec)
{
// B is exhausted but A is not
for ( ; kA < Anvec ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ;
ASSERT (C_to_B [kC] == -1) ;
}
}
else if (kB < Bnvec)
{
// A is exhausted but B is not
for ( ; kB < Bnvec ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ;
ASSERT (C_to_B [kC] == kB) ;
}
}
ASSERT (kC == Cnvec) ;
#endif
}
else if (A_is_hyper && !B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse, B is not hypersparse
//----------------------------------------------------------------------
// C will be sparse. Construct the C_to_A mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < n ; j++)
{
C_to_A [j] = -1 ;
}
// scatter Ah into C_to_A
int64_t kA ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kA = 0 ; kA < Anvec ; kA++)
{
int64_t jA = GB_Ah (kA) ;
C_to_A [jA] = kA ;
}
}
else if (!A_is_hyper && B_is_hyper)
{
//----------------------------------------------------------------------
// A is not hypersparse, B is hypersparse
//----------------------------------------------------------------------
// C will be sparse. Construct the C_to_B mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < n ; j++)
{
C_to_B [j] = -1 ;
}
// scatter Bh into C_to_B
int64_t kB ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kB = 0 ; kB < Bnvec ; kB++)
{
int64_t jB = Bh [kB] ;
C_to_B [jB] = kB ;
}
}
else
{
//----------------------------------------------------------------------
// A and B are both non-hypersparse
//----------------------------------------------------------------------
// C will be sparse
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// construct C_to_M if needed, if M is hypersparse
//--------------------------------------------------------------------------
if (C_to_M != NULL)
{
ASSERT (M_is_hyper) ;
if (Ch != NULL)
{
// C is hypersparse
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
// C_to_M [k] = kM if Mh [kM] == j and M(:,j) is non-empty
int64_t kM = 0, pM, pM_end ;
GB_lookup (true, Mh, Mp, vlen, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
else
{
// C is sparse
int64_t j ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (j = 0 ; j < n ; j++)
{
C_to_M [j] = -1 ;
}
// scatter Mh into C_to_M
int64_t kM ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (kM = 0 ; kM < Mnvec ; kM++)
{
int64_t jM = Mh [kM] ;
C_to_M [jM] = kM ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec) = Cnvec ;
(*Ch_handle) = Ch ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
}
(*C_to_A_handle) = C_to_A ;
(*C_to_B_handle) = C_to_B ;
if (p_Ch_is_Mh != NULL)
{
// return Ch_is_Mh to GB_add. For GB_masker, Ch is never Mh.
(*p_Ch_is_Mh) = Ch_is_Mh ;
}
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
// the mappings are only constructed when C is sparse or hypersparse
ASSERT ((*C_sparsity) == GxB_SPARSE || (*C_sparsity) == GxB_HYPERSPARSE) ;
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as sparse
ASSERT ((*C_sparsity) == GxB_SPARSE) ;
j = k ;
}
else
{
// C will be constructed as hypersparse
ASSERT ((*C_sparsity) == GxB_HYPERSPARSE) ;
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse
ASSERT (A_is_hyper) ;
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = GB_Ah (kA) ;
ASSERT (j == jA) ;
}
}
else
{
// A is not hypersparse
// C_to_A exists only if A is hypersparse
ASSERT (!A_is_hyper) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B_is_hyper) ;
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else
{
// B is not hypersparse
// C_to_B exists only if B is hypersparse
ASSERT (!B_is_hyper) ;
}
// see if M (:,j) exists
if (Ch_is_Mh)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M_is_hyper) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M_is_hyper) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or present and sparse, bitmap or full
ASSERT (M == NULL || !M_is_hyper) ;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017-2021. ALL RIGHTS RESERVED.
* Copyright (C) Huawei Technologies Co., Ltd. 2021. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \
"message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \
#_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t worker_addr_len;
size_t total_wireup_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_EP_CHECK)] = "ep check",
[ucs_ilog2(UCT_IFACE_FLAG_EP_KEEPALIVE)] = "ep keepalive"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_percentile_quick_select(ucs_time_t arr[], int n, double rank)
{
int low, high;
int percentile_idx;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0; high = n - 1; percentile_idx = high * (rank / 100.0);
for (;;) {
if (high <= low) { /* One element only */
return arr[percentile_idx];
}
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high]) {
ELEM_SWAP(arr[low], arr[high]);
}
return arr[percentile_idx];
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) { ELEM_SWAP(arr[middle], arr[high]); }
if (arr[low] > arr[high]) { ELEM_SWAP(arr[low], arr[high]); }
if (arr[middle] > arr[low]) { ELEM_SWAP(arr[middle], arr[low]); }
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low + 1]);
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]);
do hh--; while (arr[hh] > arr[low]);
if (hh < ll) {
break;
}
ELEM_SWAP(arr[ll], arr[hh]);
}
/* Swap middle item (in position 'low') back into correct position */
ELEM_SWAP(arr[low], arr[hh]);
/* Re-set active partition */
if (hh <= percentile_idx) {
low = ll;
}
if (hh >= percentile_idx) {
high = hh - 1;
}
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned group_index;
perf->params = *params;
group_index = rte_call(perf, group_index);
if (0 == group_index) {
perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type];
} else {
perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type];
}
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t percentile;
double factor;
if ((perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) ||
(perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG_WAIT_MEM)) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
percentile = __find_percentile_quick_select(perf->timing_queue,
ucs_min(TIMING_QUEUE_SIZE, perf->current.iters),
perf->params.percentile_rank);
result->latency.percentile = ucs_time_to_sec(percentile) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.percentile = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.percentile = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->api == UCX_PERF_API_UCP) &&
((params->send_mem_type != UCS_MEMORY_TYPE_HOST) ||
(params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) &&
((params->command == UCX_PERF_CMD_PUT) ||
(params->command == UCX_PERF_CMD_GET) ||
(params->command == UCX_PERF_CMD_ADD) ||
(params->command == UCX_PERF_CMD_FADD) ||
(params->command == UCX_PERF_CMD_SWAP) ||
(params->command == UCX_PERF_CMD_CSWAP))) {
/* TODO: remove when support for non-HOST memory types will be added */
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index)
{
uct_ep_h ep = perf->uct.peers[peer_index].ep;
uct_completion_t comp;
ucs_status_t status;
int started;
started = 0;
comp.func = NULL;
comp.count = 2;
do {
if (!started) {
status = uct_ep_flush(ep, 0, &comp);
if (status == UCS_OK) {
--comp.count;
} else if (status == UCS_INPROGRESS) {
started = 1;
} else if (status != UCS_ERR_NO_RESOURCE) {
ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status));
return;
}
}
uct_worker_progress(perf->uct.worker);
} while (comp.count > 1);
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
if (status != UCS_OK) {
ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status));
}
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) ||
(layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return ((layout == UCT_PERF_DATA_LAYOUT_SHORT) ||
(layout == UCT_PERF_DATA_LAYOUT_SHORT_IOV)) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params,
ucs_memory_type_t mem_type,
uct_md_attr_t *md_attr)
{
if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) &&
!(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT,
ucs_memory_type_names[mem_type],
UCT_PERF_TEST_PARAMS_ARG(params));
return UCS_ERR_INVALID_PARAM;
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s",
UCT_PERF_TEST_PARAMS_ARG(params),
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s",
UCT_PERF_TEST_PARAMS_ARG(params),
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->uct.am_hdr_size != sizeof(uint64_t))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->uct.am_hdr_size > attr.cap.am.max_hdr)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported "
"(%zu)",
params->uct.am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->uct.am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size "
"(%zu)",
params->uct.am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) ||
(UCT_PERF_DATA_LAYOUT_SHORT_IOV == params->uct.data_layout)) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) &&
(UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->uct.am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)",
params->uct.am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = ADDR_BUF_SIZE;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
break;
case UCX_PERF_CMD_AM:
ucp_params->features |= UCP_FEATURE_AM;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_WAKEUP) ||
(params->wait_mode == UCX_PERF_WAIT_MODE_SLEEP)) {
ucp_params->features |= UCP_FEATURE_WAKEUP;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate AM header */
if (params->ucp.am_hdr_size != 0) {
perf->ucp.am_hdr = malloc(params->ucp.am_hdr_size);
if (perf->ucp.am_hdr == NULL) {
goto err_free_buffers;
}
} else {
perf->ucp.am_hdr = NULL;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_am_hdr;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_am_hdr:
free(perf->ucp.am_hdr);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
free(perf->ucp.am_hdr);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf)
{
unsigned i, thread_count = perf->params.thread_count;
ucs_status_ptr_t *req;
ucs_status_t status;
for (i = 0; i < thread_count; ++i) {
if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) {
ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey);
}
if (perf->ucp.tctx[i].perf.ucp.ep != NULL) {
req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep,
UCP_EP_CLOSE_MODE_FLUSH);
if (UCS_PTR_IS_PTR(req)) {
do {
ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker);
status = ucp_request_check_status(req);
} while (status == UCS_INPROGRESS);
ucp_request_release(req);
} else if (UCS_PTR_STATUS(req) != UCS_OK) {
ucs_warn("failed to close ep %p on thread %d: %s\n",
perf->ucp.tctx[i].perf.ucp.ep, i,
ucs_status_string(UCS_PTR_STATUS(req)));
}
}
}
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static void ucp_perf_test_err_handler(void *arg, ucp_ep_h ep,
ucs_status_t status)
{
ucs_error("error handler called with status %d (%s)\n", status,
ucs_status_string(status));
}
static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf)
{
unsigned thread_count = perf->params.thread_count;
void *rkey_buffer = NULL;
void *req = NULL;
unsigned group_size, group_index, i;
ucx_perf_ep_info_t *remote_info;
ucp_ep_params_t ep_params;
ucp_address_t *address;
ucs_status_t status;
size_t buffer_size;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
if (group_size != 2) {
ucs_error("perftest requires group size to be exactly 2 "
"(actual group size: %u)", group_size);
return UCS_ERR_UNSUPPORTED;
}
buffer_size = ADDR_BUF_SIZE * thread_count;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
/* Initialize all endpoints and rkeys to NULL to handle error flow */
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].perf.ucp.ep = NULL;
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
/* receive the data from the remote peer, extract the address from it
* (along with additional wireup info) and create an endpoint to the peer */
rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req);
remote_info = buffer;
for (i = 0; i < thread_count; i++) {
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address,
remote_info->ucp.worker_addr_len);
perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
if (perf->params.flags & UCX_PERF_TEST_FLAG_ERR_HANDLING) {
ep_params.field_mask |= UCP_EP_PARAM_FIELD_ERR_HANDLER |
UCP_EP_PARAM_FIELD_ERR_HANDLING_MODE;
ep_params.err_handler.cb = ucp_perf_test_err_handler;
ep_params.err_handler.arg = NULL;
ep_params.err_mode = UCP_ERR_HANDLING_MODE_PEER;
}
status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params,
&perf->ucp.tctx[i].perf.ucp.ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer,
&perf->ucp.tctx[i].perf.ucp.rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
} else {
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
remote_info = UCS_PTR_BYTE_OFFSET(remote_info,
remote_info->ucp.total_wireup_len);
}
free(buffer);
return UCS_OK;
err_free_eps_buffer:
ucp_perf_test_destroy_eps(perf);
free(buffer);
err:
return status;
}
static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf,
uint64_t features)
{
unsigned i, j, thread_count = perf->params.thread_count;
size_t address_length = 0;
void *rkey_buffer = NULL;
void *req = NULL;
ucx_perf_ep_info_t *info;
ucp_address_t *address;
ucs_status_t status;
struct iovec *vec;
size_t rkey_size;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
goto err;
}
} else {
rkey_size = 0;
}
/* each thread has an iovec with 3 entries to send to the remote peer:
* ep_info, worker_address and rkey buffer */
vec = calloc(3 * thread_count, sizeof(struct iovec));
if (vec == NULL) {
ucs_error("failed to allocate iovec");
status = UCS_ERR_NO_MEMORY;
goto err_rkey_release;
}
/* get the worker address created for every thread and send it to the remote
* peer */
for (i = 0; i < thread_count; i++) {
status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker,
&address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s",
ucs_status_string(status));
}
goto err_free_workers_vec;
}
vec[i * 3].iov_base = malloc(sizeof(*info));
if (vec[i * 3].iov_base == NULL) {
ucs_error("failed to allocate vec entry for info");
status = UCS_ERR_NO_MEMORY;
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
goto err_free_workers_vec;
}
info = vec[i * 3].iov_base;
info->ucp.worker_addr_len = address_length;
info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size;
info->rkey_size = rkey_size;
info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer;
vec[(i * 3) + 0].iov_len = sizeof(*info);
vec[(i * 3) + 1].iov_base = address;
vec[(i * 3) + 1].iov_len = address_length;
vec[(i * 3) + 2].iov_base = rkey_buffer;
vec[(i * 3) + 2].iov_len = info->rkey_size;
address_length = 0;
}
/* send to the remote peer */
rte_call(perf, post_vec, vec, 3 * thread_count, &req);
rte_call(perf, exchange_vec, req);
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
for (i = 0; i < thread_count; i++) {
free(vec[i * 3].iov_base);
ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker,
vec[(i * 3) + 1].iov_base);
}
free(vec);
return UCS_OK;
err_free_workers_vec:
for (j = 0; j < i; j++) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
free(vec);
err_rkey_release:
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
err:
return status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
ucs_status_t status;
unsigned i;
/* pack the local endpoints data and send to the remote peer */
status = ucp_perf_test_send_local_data(perf, features);
if (status != UCS_OK) {
goto err;
}
/* receive remote peer's endpoints' data and connect to them */
status = ucp_perf_test_receive_remote_data(perf);
if (status != UCS_OK) {
goto err;
}
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
goto err_destroy_eps;
}
/* force wireup completion */
for (i = 0; i < perf->params.thread_count; i++) {
status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed on thread %d: %s",
i, ucs_status_string(status));
}
}
return status;
err_destroy_eps:
ucp_perf_test_destroy_eps(perf);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
ucp_perf_barrier(perf);
ucp_perf_test_destroy_eps(perf);
}
static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf)
{
unsigned i;
for (i = 0; i < perf->params.thread_count; i++) {
if (perf->ucp.tctx[i].perf.ucp.worker != NULL) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
}
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter,
ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT,
UCT_PERF_TEST_PARAMS_ARG(&perf->params));
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
#if _OPENMP
(void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker);
#else
(void*)perf->ucp.tctx[0].perf.ucp.worker);
#endif
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some transports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static void ucp_perf_request_init(void *req)
{
ucp_perf_request_t *request = req;
request->context = NULL;
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_worker_attr_t worker_attr;
ucp_config_t *config;
ucs_status_t status;
unsigned i, thread_count;
size_t message_size;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT;
ucp_params.features = 0;
ucp_params.request_size = sizeof(ucp_perf_request_t);
ucp_params.request_init = ucp_perf_request_init;
if (perf->params.thread_count > 1) {
/* when there is more than one thread, a ucp_worker would be created for
* each. all of them will share the same ucp_context */
ucp_params.field_mask |= UCP_PARAM_FIELD_MT_WORKERS_SHARED;
ucp_params.mt_workers_shared = 1;
}
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
thread_count = perf->params.thread_count;
message_size = ucx_perf_get_message_size(&perf->params);
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to allocate memory");
goto err_cleanup;
}
perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t));
if (perf->ucp.tctx == NULL) {
ucs_warn("ucp test failed to allocate memory for thread contexts");
goto err_free_mem;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].tid = i;
perf->ucp.tctx[i].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
perf->ucp.tctx[i].perf.send_buffer =
UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size);
perf->ucp.tctx[i].perf.recv_buffer =
UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size);
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
}
if (perf->params.command == UCX_PERF_CMD_AM) {
/* Check that requested AM header size is not larger than max supported. */
worker_attr.field_mask = UCP_WORKER_ATTR_FIELD_MAX_AM_HEADER;
status = ucp_worker_query(perf->ucp.tctx[0].perf.ucp.worker,
&worker_attr);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
if (worker_attr.max_am_header < perf->params.ucp.am_hdr_size) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
perf->params.ucp.am_hdr_size, worker_attr.max_am_header);
status = UCS_ERR_INVALID_PARAM;
goto err_free_tctx_destroy_workers;
}
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_tctx_destroy_workers;
}
return UCS_OK;
err_free_tctx_destroy_workers:
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
err_free_mem:
ucp_perf_test_free_mem(perf);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(const ucx_perf_params_t *params,
ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory types %s<->%s",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (params->thread_count == 1) {
if (params->api == UCX_PERF_API_UCP) {
perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker;
perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep;
perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr;
perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1, 0);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
/* new threads need explicit device association */
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.percentile = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.percentile = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.percentile = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1);
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
matmulomp.c | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <omp.h>
float **A=NULL;
float **B=NULL;
float **S=NULL;
float **P=NULL;
void createMatrix(uint32_t n){
uint32_t i=0;
uint32_t j=0;
A=(float **)calloc(n,sizeof(float *));
B=(float **)calloc(n,sizeof(float *));
S=(float **)calloc(n,sizeof(float *));
P=(float **)calloc(n,sizeof(float *));
for(i=0;i<n;i++){
A[i]=(float *)calloc(n,sizeof(float));
B[i]=(float *)calloc(n,sizeof(float));
S[i]=(float *)calloc(n,sizeof(float));
P[i]=(float *)calloc(n,sizeof(float));
for(j=0;j<n;j++){
A[i][j]=rand()%100;
B[i][j]=rand()%100;
}
}
}
void freeMatrix(uint32_t n){
uint32_t i=0;
for(i=0;i<n;i++){
free(A[i]);
free(B[i]);
free(S[i]);
free(P[i]);
}
free(A);
free(B);
free(S);
free(P);
}
void multiply_matrices_serial(uint32_t n){
uint32_t i=0;
uint32_t j=0;
uint32_t k=0;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
for(k=0;k<n;k++){
S[i][j]+=A[i][k]*B[k][j];
}
}
}
}
void multiply_matrices_omp(uint32_t n){
uint32_t i=0;
uint32_t j=0;
uint32_t k=0;
//#pragma omp parallel for schedule(static) collapse(3) private (i,j,k) shared(A,B,P)
#pragma omp parallel for schedule(dynamic) collapse(3) private (i,j,k) shared(A,B,P)
for(i=0;i<n;i++){
for(j=0;j<n;j++){
for(k=0;k<n;k++){
P[i][j]+=A[i][k]*B[k][j];
}
}
}
}
int main(int argc,char *argv[]){
uint32_t N=0;
double t[2]={};
if(argc<2)N=2;
else
N=atoi(argv[1]);
createMatrix(N);
t[1]=omp_get_wtime();
multiply_matrices_serial(N);
t[0]=omp_get_wtime();
printf("Serial: %.4lf",t[0]-t[1]);
t[1]=omp_get_wtime();
multiply_matrices_omp(N);
t[0]=omp_get_wtime();
printf("Serial: %.4lf",t[0]-t[1]);
freeMatrix(N);
return 0;
}
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <dmlc/common.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "common.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, int device, bool shard) :
func_(func), range_{std::move(range)},
shard_{shard},
device_{device} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = device_ >= 0;
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDVOnDevice(HostDeviceVector<T>* _vec) const {
auto span = _vec->DeviceSpan();
return span;
}
template <typename T>
Span<T const> UnpackHDVOnDevice(const HostDeviceVector<T>* _vec) const {
auto span = _vec->ConstDeviceSpan();
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive sync host
template <typename T>
void SyncHost(const HostDeviceVector<T> *_vector) const {
_vector->ConstHostPointer();
}
template <typename Head, typename... Rest>
void SyncHost(const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->ConstHostPointer();
SyncHost(_vectors...);
}
// Recursive unpack for Shard.
template <typename T>
void UnpackShard(int device, const HostDeviceVector<T> *vector) const {
vector->SetDevice(device);
}
template <typename Head, typename... Rest>
void UnpackShard(int device,
const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->SetDevice(device);
UnpackShard(device, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_) {
UnpackShard(device_, _vectors...);
}
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = range_size;
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device_));
const int kGrids =
static_cast<int>(DivRoundUp(*(range_.end()), kBlockThreads));
if (kGrids == 0) {
return;
}
detail::LaunchCUDAKernel<<<kGrids, kBlockThreads>>>( // NOLINT
_func, shard_range, UnpackHDVOnDevice(_vectors)...);
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*...) const {
// Remove unused parameter compiler warning.
(void) _func;
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
dmlc::OMPException omp_exc;
SyncHost(vectors...);
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
omp_exc.Run(func, idx, UnpackHDV(vectors)...);
}
omp_exc.Rethrow();
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether sharding for vectors is required. */
bool shard_;
int device_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param device Specify GPU to use.
* \param shard Whether Shard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
int device,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), device, shard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(images,complex_images,images->rows,1L)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register PixelPacket
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr);
Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red);
Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red);
gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+
snr);
Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green);
Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green);
gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue);
Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity*
Bi->opacity+snr);
Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity*
Bi->opacity);
Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity*
Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red);
Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green);
Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5;
Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity);
Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red);
Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red);
Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green);
Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green);
Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue);
Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity*
Bi->opacity);
Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity*
Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) CopyMagickMemory(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) ResetMagickMemory(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) ResetMagickMemory(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
ResetMagickMemory(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse Fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) CopyMagickMemory(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) CopyMagickMemory(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
SpatialSubSampling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialSubSampling.c"
#else
static int nn_(SpatialSubSampling_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id));
THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_(Tensor_id));
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id));
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
real *output_data;
real *input_data;
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected");
int dimw = 2;
int dimh = 1;
long nbatch = 1;
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
long inputWidth = input->size[dimw];
long inputHeight = input->size[dimh];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
luaL_argcheck(L, input->size[dimh-1] == nInputPlane, 2, "invalid number of input planes");
luaL_argcheck(L, inputWidth >= kW && inputHeight >= kH, 2, "input image smaller than kernel size");
if (input->nDimension == 3)
THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
else
THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
long k;
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
long p;
for(p = 0; p < nbatch; p++)
{
long xx, yy;
/* For all output pixels... */
real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight;
/* Get the good mask for (k,i) (k out, i in) */
real the_weight = weight_data[k];
/* Initialize to the bias */
real z = bias_data[k];
long i;
for(i = 0; i < outputWidth*outputHeight; i++)
ptr_output[i] = z;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
// Compute the mean of the input image...
real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW;
real sum = 0;
long kx, ky;
for(ky = 0; ky < kH; ky++)
{
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += inputWidth; // next input line
}
// Update output
*ptr_output++ += the_weight*sum;
}
}
}
}
THTensor_(free)(input);
return 1;
}
static int nn_(SpatialSubSampling_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id));
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_(Tensor_id));
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id));
int dimw = 2;
int dimh = 1;
long nbatch = 1;
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
long inputWidth = input->size[dimw];
long inputHeight = input->size[dimh];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
real *weight_data = THTensor_(data)(weight);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *input_data, *gradInput_data;
input_data = THTensor_(data)(input);
THTensor_(resizeAs)(gradInput, input);
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
long k;
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
long p;
for(p = 0; p < nbatch; p++)
{
real the_weight = weight_data[k];
real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight;
long xx, yy;
real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight;
long i;
for(i=0; i<inputWidth*inputHeight; i++)
ptr_gi[i] = 0.0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
real *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW;
real z = *ptr_gradOutput++ * the_weight;
long kx, ky;
for(ky = 0; ky < kH; ky++)
{
for(kx = 0; kx < kW; kx++)
ptr_gradInput[kx] += z;
ptr_gradInput += inputWidth;
}
}
}
}
}
return 1;
}
static int nn_(SpatialSubSampling_accGradParameters)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id));
THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id));
real scale = luaL_optnumber(L, 4, 1);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_(Tensor_id));
THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_(Tensor_id));
long nbatch = 1;
long dimw = 2;
long dimh = 1;
if (input->nDimension == 4) {
dimw++;
dimh++;
nbatch = input->size[0];
}
long inputWidth = input->size[dimw];
long inputHeight = input->size[dimh];
long outputWidth = (inputWidth - kW) / dW + 1;
long outputHeight = (inputHeight - kH) / dH + 1;
real *gradWeight_data = THTensor_(data)(gradWeight);
real *gradBias_data = THTensor_(data)(gradBias);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *input_data;
input = THTensor_(newContiguous)(input);
input_data = THTensor_(data)(input);
long k;
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane; k++)
{
long p;
for(p = 0; p < nbatch; p++)
{
real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight;
real sum;
long xx, yy;
sum = 0;
long i;
for(i = 0; i < outputWidth*outputHeight; i++)
sum += ptr_gradOutput[i];
gradBias_data[k] += scale*sum;
sum = 0;
for(yy = 0; yy < outputHeight; yy++)
{
for(xx = 0; xx < outputWidth; xx++)
{
real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW;
real z = *ptr_gradOutput++;
long kx, ky;
for(ky = 0; ky < kH; ky++)
{
for(kx = 0; kx < kW; kx++)
sum += z * ptr_input[kx];
ptr_input += inputWidth;
}
}
}
gradWeight_data[k] += scale*sum;
}
}
THTensor_(free)(input);
return 0;
}
static const struct luaL_Reg nn_(SpatialSubSampling__) [] = {
{"SpatialSubSampling_updateOutput", nn_(SpatialSubSampling_updateOutput)},
{"SpatialSubSampling_updateGradInput", nn_(SpatialSubSampling_updateGradInput)},
{"SpatialSubSampling_accGradParameters", nn_(SpatialSubSampling_accGradParameters)},
{NULL, NULL}
};
static void nn_(SpatialSubSampling_init)(lua_State *L)
{
luaT_pushmetaclass(L, torch_(Tensor_id));
luaT_registeratname(L, nn_(SpatialSubSampling__), "nn");
lua_pop(L,1);
}
#endif
|
LAGraph_bfs_pushpull.c | //------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if (v_output == NULL || (A == NULL && AT == NULL))
{
// required output argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
GrB_Descriptor desc_s = GrB_DESC_S ;
GrB_Descriptor desc_sc = GrB_DESC_SC ;
GrB_Descriptor desc_rc = GrB_DESC_RC ;
GrB_Descriptor desc_r = GrB_DESC_R ;
#else
GrB_Descriptor desc_s = NULL ;
GrB_Descriptor desc_sc = LAGraph_desc_ooco ;
GrB_Descriptor desc_rc = LAGraph_desc_oocr ;
GrB_Descriptor desc_r = LAGraph_desc_ooor ;
#endif
bool use_vxm_with_A ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
if (A == NULL)
{
// only AT is provided
LAGr_Matrix_ncols (&nrows, AT) ;
LAGr_Matrix_nrows (&ncols, AT) ;
LAGr_Matrix_nvals (&nvalA, AT) ;
use_vxm_with_A = false ;
}
else
{
// A is provided. AT may or may not be provided
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nvalA, A) ;
use_vxm_with_A = true ;
}
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// check the format of A and AT
//--------------------------------------------------------------------------
bool csr = true ;
// csr is true if A and AT are known (or assumed) to be in CSR format; if
// false, they are known to be in CSC format.
// This can be tested in SuiteSparse:GraphBLAS. Other libraries can use
// this section for their own library-specific tests, if they have them.
// LAGraph_bfs_pushpull will work just fine if nothing is changed or if the
// following is disabled (even SuiteSparse:GraphBLAS). The push/pull
// behaviour will be unpredicatble, however, unless the library default
// format is CSR.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
// The CSR vs CSC status can be tested in SuiteSparse:GraphBLAS.
// However, even with SuiteSparse:GraphBLAS, this step is optional.
GxB_Format_Value A_format = -1, AT_format = -1 ;
bool A_csr = true, AT_csr = true ;
if (A != NULL)
{
// A_csr is true if accessing A(i,:) is fast
LAGr_get (A , GxB_FORMAT, &A_format) ;
A_csr = (A_format == GxB_BY_ROW) ;
}
if (AT != NULL)
{
// AT_csr is true if accessing AT(i,:) is fast
LAGr_get (AT, GxB_FORMAT, &AT_format) ;
AT_csr = (AT_format == GxB_BY_ROW) ;
}
// Assume CSR if A(i,:) and AT(i,:) are both fast. If csr is false,
// then the algorithm below will reverse the use of vxm and mxv.
csr = A_csr && AT_csr ;
if (push_pull)
{
// both A and AT are provided. Require they have the same format.
// Either both A(i,:) and AT(i,:) are efficient to accesss, or both
// A(:,j) and AT(:,j) are efficient to access.
if (A_csr != AT_csr)
{
LAGRAPH_ERROR ("A and AT must in the same format:\n"
"both GxB_BY_ROW, or both GxB_BY_COL",
GrB_INVALID_VALUE) ;
}
}
else
{
// only A or AT are provided. Refuse to do the pull-only version.
if (A != NULL && A_format == GxB_BY_COL)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: AT not provided, so A must be GxB_BY_ROW\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
if (AT != NULL && AT_format == GxB_BY_ROW)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: A not provided, so AT must be GxB_BY_COL\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
}
#endif
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN (n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
LAGr_Vector_new (&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX (256, sqrt ((double) n)) ;
if (!vsparse)
{
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
LAGr_assign (v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
GrB_Semiring first_semiring, second_semiring ;
if (compute_tree)
{
// create an integer vector q, and set q(source) to source+1
LAGr_Vector_new (&q, int_type, n) ;
LAGr_Vector_setElement (q, source+1, source) ;
if (n > INT32_MAX)
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT64 ;
second_semiring = LAGraph_MIN_SECOND_INT64 ;
#endif
}
else
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT32 ;
second_semiring = LAGraph_MIN_SECOND_INT32 ;
#endif
}
// create the empty parent vector
LAGr_Vector_new (&pi, int_type, n) ;
if (!vsparse)
{
// make pi a dense vector of all zeros
LAGr_assign (pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
LAGr_Vector_setElement (pi, source+1, source) ;
}
else
{
// create a boolean vector q, and set q(source) to true
LAGr_Vector_new (&q, GrB_BOOL, n) ;
LAGr_Vector_setElement (q, true, source) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
#else
// can terminate early, but requires more data movement internally
first_semiring = LAGraph_LOR_FIRST_BOOL ;
second_semiring = LAGraph_LOR_SECOND_BOOL ;
#endif
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for (int64_t level = 1 ; ; level++)
{
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
LAGr_assign (v, q, NULL, level, GrB_ALL, n, desc_s) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if (nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if (vsparse && nvisited > vlimit)
{
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
LAGr_assign (v, v, NULL, 0, GrB_ALL, n, desc_sc) ;
LAGr_Vector_nvals (&ignore, v) ;
if (compute_tree)
{
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
LAGr_assign (pi, pi, NULL, 0, GrB_ALL, n, desc_sc) ;
LAGr_Vector_nvals (&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if (push_pull)
{
double pushwork = d * nq ;
double expected = (double) n / (double) (nvisited+1) ;
double per_dot = LAGRAPH_MIN (d, expected) ;
double binarysearch = (3 * (1 + log2 ((double) nq))) ;
double pullwork = (n-nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
if (!csr)
{
// Neither A(i,:) nor AT(i,:) is efficient. Instead, both
// A(:,j) and AT(:,j) is fast (that is, the two matrices
// are in CSC format). Swap the
use_vxm_with_A = !use_vxm_with_A ;
}
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if (use_vxm_with_A)
{
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
LAGr_vxm (q, v, NULL, first_semiring, q, A, desc_rc) ;
}
else
{
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
LAGr_mxv (q, v, NULL, second_semiring, AT, q, desc_rc) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if (compute_tree)
{
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
LAGr_assign (pi, q, NULL, q, GrB_ALL, n, desc_s) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
GrB_Index *qi ;
if (n > INT32_MAX)
{
int64_t *qx ;
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int64_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
}
else
{
int32_t *qx ;
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int32_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
}
#else
// TODO: use extractTuples and build instead
// Or use something like:
// extract tuples into I
// let e = 1:n be created once, in initialization phase
// q<q> = e (I)
fprintf (stderr, "TODO: use extractTuples here\n") ;
abort ( ) ;
#endif
}
else
{
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
LAGr_Vector_nvals (&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if (compute_tree)
{
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
}
|
DRB063-outeronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
*/
int n=100, m=100;
double b[100][100];
#define N 100
int init()
{
int i,j,k;
#pragma omp parallel for private(i, j)
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j)
for (j = 0; j < N; j++) {
b[i][j] = i * j;
}
}
return 0;
}
void foo()
{
int i,j;
#pragma omp parallel for private(i, j)
for (i=0;i<n;i++)
for (j=0;j<m-1;j++) // Be careful about bounds of j
b[i][j]=b[i][j+1];
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", b[i][j]);
}
}
return 0;
}
int main()
{
init();
foo();
print();
return 0;
}
|
edge_miner.h | #ifndef EDGE_MINER_H
#define EDGE_MINER_H
#include <mutex>
#include "miner.h"
#include "domain_support.h"
typedef std::pair<unsigned, unsigned> InitPattern;
typedef QuickPattern<EdgeEmbedding, ElementType> QPattern;
typedef CanonicalGraph<EdgeEmbedding, ElementType> CPattern;
typedef std::unordered_map<QPattern, Frequency> QpMapFreq; // quick pattern map (mapping quick pattern to its frequency)
typedef std::unordered_map<CPattern, Frequency> CgMapFreq; // canonical pattern map (mapping canonical pattern to its frequency)
typedef std::map<InitPattern, DomainSupport*> InitMap;
typedef std::unordered_map<QPattern, DomainSupport*> QpMapDomain; // quick pattern map (mapping quick pattern to its domain support)
typedef std::unordered_map<CPattern, DomainSupport*> CgMapDomain; // canonical pattern map (mapping canonical pattern to its domain support)
typedef std::unordered_map<unsigned, unsigned> FreqMap;
typedef std::unordered_map<unsigned, bool> DomainMap;
typedef PerThreadStorage<InitMap> LocalInitMap;
typedef PerThreadStorage<QpMapFreq> LocalQpMapFreq; // PerThreadStorage: thread-local quick pattern map
typedef PerThreadStorage<CgMapFreq> LocalCgMapFreq; // PerThreadStorage: thread-local canonical pattern map
typedef PerThreadStorage<QpMapDomain> LocalQpMapDomain;
typedef PerThreadStorage<CgMapDomain> LocalCgMapDomain;
class EdgeMiner : public Miner {
public:
EdgeMiner(Graph *g, unsigned size = 3, int nthreads = 1) {
graph = g;
max_size = size;
numThreads = nthreads;
construct_edgemap();
init_localmaps.set_size(nthreads);
qp_localmaps.set_size(nthreads);
cg_localmaps.set_size(nthreads);
}
virtual ~EdgeMiner() {}
void extend_edge(unsigned level, EmbeddingList& emb_list) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
num_new_emb[pos] = 0;
unsigned n = emb.size();
std::set<VertexId> vert_set;
if (n > 3)
for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i));
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
if (emb.get_key(i) == 0) { // TODO: need to fix this
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
BYTE existed = 0;
//if (is_frequent_edge[e])
if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set))
num_new_emb[pos] ++;
}
}
}
emb.clean();
}
Ulong new_size = std::accumulate(num_new_emb.begin(), num_new_emb.end(), (Ulong)0);
std::cout << "new_size = " << new_size << "\n";
assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32
UintList indices = parallel_prefix_sum(num_new_emb);
new_size = indices[indices.size()-1];
emb_list.add_level(new_size);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
unsigned start = indices[pos];
unsigned n = emb.size();
std::set<VertexId> vert_set;
if (n > 3)
for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i));
for (unsigned i = 0; i < n; ++i) {
IndexT src = emb.get_vertex(i);
if (emb.get_key(i) == 0) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
BYTE existed = 0;
//if (is_frequent_edge[e])
if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_his(level+1, start, i);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
}
}
inline unsigned init_aggregator() {
init_map.clear();
for (IndexT src = 0; src < graph->num_vertices(); src ++) {
InitMap *lmap = init_localmaps.getLocal();
auto src_label = graph->getData(src);
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
auto dst_label = graph->getData(dst);
if (src_label <= dst_label) {
InitPattern key = get_init_pattern(src_label, dst_label);
if (lmap->find(key) == lmap->end()) {
(*lmap)[key] = new DomainSupport(2);
(*lmap)[key]->set_threshold(threshold);
}
(*lmap)[key]->add_vertex(0, src);
(*lmap)[key]->add_vertex(1, dst);
}
}
}
merge_init_map();
std::cout << "Number of single-edge patterns: " << init_map.size() << "\n";
unsigned count = 0;
for (auto it = init_map.begin(); it != init_map.end(); ++it)
if (it->second->get_support()) count ++;
return count; // return number of frequent single-edge patterns
}
inline void quick_aggregate(unsigned level, EmbeddingList& emb_list) {
for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear();
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
QpMapDomain *lmap = qp_localmaps.getLocal();
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
unsigned n = emb.size();
QPattern qp(emb, true);
bool qp_existed = false;
auto it = lmap->find(qp);
if (it == lmap->end()) {
(*lmap)[qp] = new DomainSupport(n);
(*lmap)[qp]->set_threshold(threshold);
emb_list.set_pid(pos, qp.get_id());
} else {
qp_existed = true;
emb_list.set_pid(pos, (it->first).get_id());
}
for (unsigned i = 0; i < n; i ++) {
if ((*lmap)[qp]->has_domain_reached_support(i) == false)
(*lmap)[qp]->add_vertex(i, emb.get_vertex(i));
}
if (qp_existed) qp.clean();
}
}
void insert_id_map(int qp_id, int cg_id) {
std::unique_lock<std::mutex> lock(map_mutex);
id_map.insert(std::make_pair(qp_id, cg_id));
}
// aggregate quick patterns into canonical patterns.
// construct id_map from quick pattern ID (qp_id) to canonical pattern ID (cg_id)
void canonical_aggregate() {
id_map.clear();
for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear();
for (std::pair<QPattern, DomainSupport*> element : qp_map) {
CgMapDomain *lmap = cg_localmaps.getLocal();
unsigned num_domains = element.first.get_size();
CPattern cg(element.first);
int qp_id = element.first.get_id();
int cg_id = cg.get_id();
insert_id_map(qp_id, cg_id);
auto it = lmap->find(cg);
if (it == lmap->end()) {
(*lmap)[cg] = new DomainSupport(num_domains);
(*lmap)[cg]->set_threshold(threshold);
element.first.set_cgid(cg.get_id());
} else {
element.first.set_cgid((it->first).get_id());
}
VertexPositionEquivalences equivalences;
element.first.get_equivalences(equivalences);
for (unsigned i = 0; i < num_domains; i ++) {
if ((*lmap)[cg]->has_domain_reached_support(i) == false) {
unsigned qp_idx = cg.get_quick_pattern_index(i);
assert(qp_idx >= 0 && qp_idx < num_domains);
UintSet equ_set = equivalences.get_equivalent_set(qp_idx);
for (unsigned idx : equ_set) {
DomainSupport *support = element.second;
if (support->has_domain_reached_support(idx) == false) {
bool reached_threshold = (*lmap)[cg]->add_vertices(i, support->domain_sets[idx]);
if (reached_threshold) break;
} else {
(*lmap)[cg]->set_domain_frequent(i);
break;
}
}
}
}
cg.clean();
}
}
inline void merge_init_map() {
init_map = *(init_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
for (auto element : *init_localmaps.getLocal(i)) {
DomainSupport *support = element.second;
if (init_map.find(element.first) == init_map.end()) {
init_map[element.first] = support;
} else {
for (unsigned i = 0; i < 2; i ++) {
if (!init_map[element.first]->has_domain_reached_support(i)) {
if (support->has_domain_reached_support(i))
init_map[element.first]->set_domain_frequent(i);
else init_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
}
inline void merge_qp_map(unsigned num_domains) {
qp_map.clear();
qp_map = *(qp_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
const QpMapDomain *lmap = qp_localmaps.getLocal(i);
for (auto element : *lmap) {
if (qp_map.find(element.first) == qp_map.end())
qp_map[element.first] = element.second;
}
for (std::pair<QPattern, DomainSupport*> element : *lmap) {
DomainSupport *support = element.second;
for (unsigned i = 0; i < num_domains; i ++) {
if (!qp_map[element.first]->has_domain_reached_support(i) && qp_map[element.first] != support) {
if (support->has_domain_reached_support(i))
qp_map[element.first]->set_domain_frequent(i);
else qp_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
inline void merge_cg_map(unsigned num_domains) {
cg_map.clear();
cg_map = *(cg_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
const CgMapDomain *lmap = cg_localmaps.getLocal(i);
for (auto element : *lmap) {
if (cg_map.find(element.first) == cg_map.end())
cg_map[element.first] = element.second;
}
for (std::pair<CPattern, DomainSupport*> element : *lmap) {
DomainSupport *support = element.second;
for (unsigned i = 0; i < num_domains; i ++) {
if (!cg_map[element.first]->has_domain_reached_support(i) && cg_map[element.first] != support) {
if (support->has_domain_reached_support(i))
cg_map[element.first]->set_domain_frequent(i);
else cg_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
// Filtering for FSM
#ifdef ENABLE_LABEL
inline void init_filter(EmbeddingList& emb_list) {
UintList is_frequent_emb(emb_list.size(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexId src = emb_list.get_idx(1, pos);
VertexId dst = emb_list.get_vid(1, pos);
auto src_label = graph->getData(src);
auto dst_label = graph->getData(dst);
InitPattern key = get_init_pattern(src_label, dst_label);
if (init_map[key]->get_support()) is_frequent_emb[pos] = 1;
}
//assert(emb_list.size()*2 == graph->num_edges()); // symmetric graph
is_frequent_edge.resize(graph->num_edges());
std::fill(is_frequent_edge.begin(), is_frequent_edge.end(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
VertexId src = emb_list.get_idx(1, pos);
VertexId dst = emb_list.get_vid(1, pos);
unsigned eid0 = edge_map[OrderedEdge(src,dst)];
unsigned eid1 = edge_map[OrderedEdge(dst,src)];
__sync_bool_compare_and_swap(&is_frequent_edge[eid0], 0, 1);
__sync_bool_compare_and_swap(&is_frequent_edge[eid1], 0, 1);
}
}
std::cout << "Number of frequent edges: " << count(is_frequent_edge.begin(), is_frequent_edge.end(), 1) << "\n";
UintList indices = parallel_prefix_sum(is_frequent_emb);
VertexList vid_list0 = emb_list.get_idx_list(1);
VertexList vid_list1 = emb_list.get_vid_list(1);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
VertexId src = vid_list0[pos];
VertexId dst = vid_list1[pos];
unsigned start = indices[pos];
emb_list.set_vid(1, start, dst);
emb_list.set_idx(1, start, src);
}
}
emb_list.remove_tail(indices.back());
}
#endif
inline void filter(unsigned level, EmbeddingList &emb_list) {
UintList is_frequent_emb(emb_list.size(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
unsigned qp_id = emb_list.get_pid(pos);
unsigned cg_id = id_map.at(qp_id);
if (domain_support_map.at(cg_id))
is_frequent_emb[pos] = 1;
}
UintList indices = parallel_prefix_sum(is_frequent_emb);
VertexList vid_list = emb_list.get_vid_list(level);
UintList idx_list = emb_list.get_idx_list(level);
ByteList his_list = emb_list.get_his_list(level);
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
unsigned start = indices[pos];
VertexId vid = vid_list[pos];
IndexTy idx = idx_list[pos];
BYTE his = his_list[pos];
emb_list.set_idx(level, start, idx);
emb_list.set_vid(level, start, vid);
emb_list.set_his(level, start, his);
}
}
emb_list.remove_tail(indices.back());
}
inline void set_threshold(const unsigned minsup) { threshold = minsup; }
inline void printout_agg(const CgMapFreq &cg_map) {
for (auto it = cg_map.begin(); it != cg_map.end(); ++it)
std::cout << "{" << it->first << " --> " << it->second << std::endl;
}
inline void printout_agg() {
std::cout << "num_patterns: " << cg_map.size() << " num_quick_patterns: " << qp_map.size() << "\n";
BoolVec support(cg_map.size());
int i = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
support[i] = it->second->get_support();
i ++;
}
i = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
std::cout << "{" << it->first << " --> " << support[i] << std::endl;
i ++;
}
}
inline unsigned support_count() {
domain_support_map.clear();
unsigned count = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
bool support = it->second->get_support();
domain_support_map.insert(std::make_pair(it->first.get_id(), support));
if (support) count ++;
}
return count;
}
// construct edge-map for later use. May not be necessary if Galois has this support
void construct_edgemap() {
for (auto src = 0; src < graph->num_vertices(); src ++) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
auto dst = graph->getEdgeDst(e);
OrderedEdge edge(src, dst);
edge_map.insert(std::pair<OrderedEdge, unsigned>(edge, e));
}
}
}
private:
unsigned threshold;
InitMap init_map;
UintMap id_map;
unsigned max_size;
int numThreads;
FreqMap freq_support_map;
DomainMap domain_support_map;
std::map<OrderedEdge, unsigned> edge_map;
std::set<std::pair<VertexId,VertexId> > freq_edge_set;
std::vector<unsigned> is_frequent_edge;
LocalInitMap init_localmaps; // initialization map, only used for once, no need to clear
LocalQpMapDomain qp_localmaps; // quick pattern local map for each thread
LocalCgMapDomain cg_localmaps; // canonical pattern local map for each thread
QpMapDomain qp_map; // quick pattern map
CgMapDomain cg_map; // canonical graph map
std::mutex map_mutex;
inline InitPattern get_init_pattern(BYTE src_label, BYTE dst_label) {
if (src_label <= dst_label) return std::make_pair(src_label, dst_label);
else return std::make_pair(dst_label, src_label);
}
inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EdgeEmbedding &emb) {
VertexId vid = emb_list.get_vid(level, pos);
IndexTy idx = emb_list.get_idx(level, pos);
BYTE his = emb_list.get_his(level, pos);
BYTE lab = graph->getData(vid);
ElementType ele(vid, 0, lab, his);
emb.set_element(level, ele);
for (unsigned l = 1; l < level; l ++) {
vid = emb_list.get_vid(level-l, idx);
his = emb_list.get_his(level-l, idx);
lab = graph->getData(vid);
ElementType ele(vid, 0, lab, his);
emb.set_element(level-l, ele);
idx = emb_list.get_idx(level-l, idx);
}
lab = graph->getData(idx);
ElementType ele0(idx, 0, lab, 0);
emb.set_element(0, ele0);
}
bool is_quick_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed) {
if (dst <= emb.get_vertex(0)) return true;
if (dst == emb.get_vertex(1)) return true;
if (history == 0 && dst < emb.get_vertex(1)) return true;
if (size == 2) {
} else if (size == 3) {
if (history == 0 && emb.get_history(2) == 0 && dst <= emb.get_vertex(2)) return true;
if (history == 0 && emb.get_history(2) == 1 && dst == emb.get_vertex(2)) return true;
if (history == 1 && emb.get_history(2) == 1 && dst <= emb.get_vertex(2)) return true;
if (dst == emb.get_vertex(2)) existed = 1;
//if (!existed && max_size < 4) return true;
} else {
std::cout << "Error: should go to detailed check\n";
}
return false;
}
bool is_edge_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed, const std::set<VertexId>& vertex_set) {
if (size < 3) return is_quick_automorphism(size, emb, history, src, dst, existed);
// check with the first element
if (dst <= emb.get_vertex(0)) return true;
if (history == 0 && dst <= emb.get_vertex(1)) return true;
// check loop edge
if (dst == emb.get_vertex(emb.get_history(history))) return true;
if (vertex_set.find(dst) != vertex_set.end()) existed = 1;
// check to see if there already exists the vertex added;
// if so, just allow to add edge which is (smaller id -> bigger id)
if (existed && src > dst) return true;
std::pair<VertexId, VertexId> added_edge(src, dst);
for (unsigned index = history + 1; index < emb.size(); ++index) {
std::pair<VertexId, VertexId> edge;
edge.first = emb.get_vertex(emb.get_history(index));
edge.second = emb.get_vertex(index);
//assert(edge.first != edge.second);
int cmp = compare(added_edge, edge);
if(cmp <= 0) return true;
}
return false;
}
inline void swap(std::pair<VertexId, VertexId>& pair) {
if (pair.first > pair.second) {
VertexId tmp = pair.first;
pair.first = pair.second;
pair.second = tmp;
}
}
inline int compare(std::pair<VertexId, VertexId>& oneEdge, std::pair<VertexId, VertexId>& otherEdge) {
swap(oneEdge);
swap(otherEdge);
if(oneEdge.first == otherEdge.first) return oneEdge.second - otherEdge.second;
else return oneEdge.first - otherEdge.first;
}
};
#endif // EDGE_MINER_HPP_
|
gmm_uborder_fun.c | /*
*
* gmm_uborder_fun.c
*
* Code generation for function 'gmm_uborder_fun'
*
*/
/* Include files */
#include "gmm_uborder_fun.h"
#include "fetch_thresholds.h"
#include "fetch_thresholds_emxutil.h"
#include "rt_nonfinite.h"
#include <math.h>
/* Function Definitions */
double __anon_fcn(const double mu[2], const double sig[2], const double amp[2],
double x)
{
double varargout_1;
double varargin_1[2];
normpdfs(x, mu, sig, amp, varargin_1);
if ((varargin_1[0] < varargin_1[1]) || (rtIsNaN(varargin_1[0]) && (!rtIsNaN
(varargin_1[1])))) {
varargout_1 = varargin_1[1];
} else {
varargout_1 = varargin_1[0];
}
return varargout_1;
}
/*
* function [ vals ] = normpdfs(x, mu, sig, amp)
*/
void b_normpdfs(double x, const emxArray_real_T *mu, const emxArray_real_T *sig,
const emxArray_real_T *amp, emxArray_real_T *vals)
{
int ub_loop;
int loop_ub;
int i;
double t;
/* 'gmm_uborder_fun:14' n = length(mu); */
/* 'gmm_uborder_fun:15' vals = NaN(n, length(x)); */
ub_loop = vals->size[0];
vals->size[0] = mu->size[0];
emxEnsureCapacity_real_T(vals, ub_loop);
loop_ub = mu->size[0];
for (ub_loop = 0; ub_loop < loop_ub; ub_loop++) {
vals->data[ub_loop] = rtNaN;
}
ub_loop = mu->size[0] - 1;
#pragma omp parallel for \
num_threads(omp_get_max_threads()) \
private(t)
for (i = 0; i <= ub_loop; i++) {
/* 'gmm_uborder_fun:17' vals(i, :) = amp(i) * normpdf(x, mu(i), sig(i)); */
if (sig->data[i] > 0.0) {
t = (x - mu->data[i]) / sig->data[i];
t = exp(-0.5 * t * t) / (2.5066282746310002 * sig->data[i]);
} else {
t = rtNaN;
}
vals->data[i] = amp->data[i] * t;
}
}
/*
* function [ vals ] = normpdfs(x, mu, sig, amp)
*/
void normpdfs(double x, const double mu[2], const double sig[2], const double
amp[2], double vals[2])
{
int i;
double t;
/* 'gmm_uborder_fun:14' n = length(mu); */
/* 'gmm_uborder_fun:15' vals = NaN(n, length(x)); */
#pragma omp parallel for \
num_threads(omp_get_max_threads()) \
private(t)
for (i = 0; i < 2; i++) {
/* 'gmm_uborder_fun:17' vals(i, :) = amp(i) * normpdf(x, mu(i), sig(i)); */
if (sig[i] > 0.0) {
t = (x - mu[i]) / sig[i];
t = exp(-0.5 * t * t) / (2.5066282746310002 * sig[i]);
} else {
t = rtNaN;
}
vals[i] = amp[i] * t;
}
}
/* End of code generation (gmm_uborder_fun.c) */
|
portablegl.h | /*
PortableGL 0.95 MIT licensed software renderer that closely mirrors OpenGL 3.x
portablegl.com
robertwinkler.com
Do this:
#define PORTABLEGL_IMPLEMENTATION
before you include this file in *one* C or C++ file to create the implementation.
If you plan on using your own 3D vector/matrix library rather than crsw_math
that is built into PortableGL and your names are the standard glsl vec[2-4],
mat[3-4] etc., define MANGLE_TYPES too before including portablegl to
prefix all those builtin types with glinternal_ to avoid the clash.
You can check all the C++ examples and demos, I use my C++ rsw_math library.
// i.e. it should look like this:
#include ...
#include ...
#include ...
// if required
#define MANGLE_TYPES
#define PORTABLEGL_IMPLEMENTATION
#include "portablegl.h"
I use my CVector library for various types in PortableGL so you *can* #define
CVEC_ASSERT, CVEC_MEMMOVE, and (mutually inclusive) CVEC_MALLOC, CVEC_REALLOC,
and CVEC_FREE before the #include to avoid using the standard library
versions. However, currently, I use at least malloc, realloc, and memcpy in
PortableGL so doing so wouldn't actually avoid the standard library. Creating
equivalent PortableGL macros (that would automagically apply to any internally
used cvectors) is a TODO I suppose.
QUICK NOTES:
Primarily of interest to game/graphics developers and other people who
just want to play with the graphics pipeline and don't need peak
performance or the the entirety of OpenGL or Vulkan features.
RGBA32 is the only currently supported format for textures
Only GL_TEXTURE_MAG_FILTER is actually used internally but you can set the
MIN_FILTER for a texture.
8-bit per channel RGBA is the only supported format for the framebuffer
You can specify the order using the masks in init_glContext. Technically
it'd be relatively trivial to add support for other formats but for now
we use a u32* to access the buffer.
Any PortableGL program has roughly this structure, with some things
possibly declared globally or passed around in function parameters
as needed:
#define WIDTH 640
#define HEIGHT 480
// shaders are functions matching these prototypes
void smooth_vs(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms);
void smooth_fs(float* fs_input, Shader_Builtins* builtins, void* uniforms);
typedef struct My_Uniforms {
mat4 mvp_mat;
vec4 v_color;
} My_Uniforms;
u32* backbuf;
glContext the_context;
if (!init_glContext(&the_context, &backbuf, WIDTH, HEIGHT, 32, 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000)) {
puts("Failed to initialize glContext");
exit(0);
}
set_glContext(&the_context);
// interpolation is an array with an entry of SMOOTH, FLAT or
// NOPERSPECTIVE for each float being interpolated between the
// vertex and fragment shaders
// the last parameter is whether the fragment shader writes to
// gl_FragDepth or discard, but it's not currently used. In the future I may
// have a macro that enables early depth testing *if* that parameter is
// false for a minor performance boost but canonicaly depth test happens
// after the frag shader (and scissoring)
GLenum interpolation[4] = { SMOOTH, SMOOTH, SMOOTH, SMOOTH };
GLuint myshader = pglCreateProgram(smooth_vs, smooth_fs, 4, interpolation, GL_FALSE);
glUseProgram(myshader);
My_Uniform the_uniforms;
pglSetUniform(&the_uniforms);
the_uniforms.v_color = Red; // not actually used, using per vert color
memcpy(the_uniforms.mvp_mat, identity, sizeof(mat4));
// Your standard OpenGL buffer setup etc. here
// Like the compatibility profile, we allow/enable a default
// VAO. We also have a default shader program for the same reason,
// something to fill index 0.
// see implementation of init_glContext for details
while (1) {
// standard glDraw calls, switching shaders etc.
// use backbuf however you want, whether that's blitting
// it to some framebuffer in your GUI system, or even writing
// it out to disk with something like stb_image_write.
}
free_glContext(&the_context);
// compare with equivalent glsl below
void smooth_vs(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms)
{
vec4* v_attribs = vertex_attribs;
((vec4*)vs_output)[0] = v_attribs[1]; //color
builtins->gl_Position = mult_mat4_vec4(*((mat4*)uniforms), v_attribs[0]);
}
void smooth_fs(float* fs_input, Shader_Builtins* builtins, void* uniforms)
{
builtins->gl_FragColor = ((vec4*)fs_input)[0];
}
// note smooth is the default so this is the same as smooth out vec4 vary_color
// https://www.khronos.org/opengl/wiki/Type_Qualifier_(GLSL)#Interpolation_qualifiers
uniform mvp_mat
layout (location = 0) in vec4 in_vertex;
layout (location = 1) in vec4 in_color;
out vec4 vary_color;
void main(void)
{
vary_color = in_color;
gl_Position = mvp_mat * in_vertex;
}
in vec4 vary_color;
out vec4 frag_color;
void main(void)
{
frag_color = vary_color;
}
That's basically it. There are some other non-standard features like
pglSetInterp that lets you change the interpolation of a shader
whenever you want. In real OpenGL you'd have to have 2 (or more) separate
but almost identical shaders to do that.
There are also these predefined maximums which, considering the performance
limitations of PortableGL, are probably more than enough. MAX_DRAW_BUFFERS
isn't used since they're not currently supported anyway.
#define MAX_VERTICES 500000
#define GL_MAX_VERTEX_ATTRIBS 16
#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 64
#define GL_MAX_DRAW_BUFFERS 8
MIT License
Copyright (c) 2011-2022 Robert Winkler
Copyright (c) 1997-2022 Fabrice Bellard (clipping code from TinyGL)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
#ifdef MANGLE_TYPES
#define vec2 glinternal_vec2
#define vec3 glinternal_vec3
#define vec4 glinternal_vec4
#define dvec2 glinternal_dvec2
#define dvec3 glinternal_dvec3
#define dvec4 glinternal_dvec4
#define ivec2 glinternal_ivec2
#define ivec3 glinternal_ivec3
#define ivec4 glinternal_ivec4
#define uvec2 glinternal_uvec2
#define uvec3 glinternal_uvec3
#define uvec4 glinternal_uvec4
#define mat2 glinternal_mat2
#define mat3 glinternal_mat3
#define mat4 glinternal_mat4
#define Color glinternal_Color
#define Line glinternal_Line
#define Plane glinternal_Plane
#endif
#ifndef GL_H
#define GL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef CRSW_MATH_H
#define CRSW_MATH_H
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define RM_PI (3.14159265358979323846)
#define RM_2PI (2.0 * RM_PI)
#define PI_DIV_180 (0.017453292519943296)
#define INV_PI_DIV_180 (57.2957795130823229)
#define DEG_TO_RAD(x) ((x)*PI_DIV_180)
#define RAD_TO_DEG(x) ((x)*INV_PI_DIV_180)
/* Hour angles */
#define HR_TO_DEG(x) ((x) * (1.0 / 15.0))
#define HR_TO_RAD(x) DEG_TO_RAD(HR_TO_DEG(x))
#define DEG_TO_HR(x) ((x) * 15.0)
#define RAD_TO_HR(x) DEG_TO_HR(RAD_TO_DEG(x))
// TODO rename RM_MAX? make proper inline functions?
#ifndef MAX
#define MAX(a, b) ((a) > (b)) ? (a) : (b)
#endif
#ifndef MIN
#define MIN(a, b) ((a) < (b)) ? (a) : (b)
#endif
#define MAP(X, A, B, C, D) ((X)-(A))/((B)-(A)) * ((D)-(C)) + (C)
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef int8_t i8;
typedef int16_t i16;
typedef int32_t i32;
typedef int64_t i64;
// returns float [0,1)
inline float rsw_randf()
{
return rand() / (RAND_MAX + 1.0f);
}
inline float rsw_randf_range(float min, float max)
{
return min + (max-min) * rsw_randf();
}
typedef struct vec2
{
float x;
float y;
} vec2;
typedef struct vec3
{
float x;
float y;
float z;
} vec3;
typedef struct vec4
{
float x;
float y;
float z;
float w;
} vec4;
#define SET_VEC2(v, _x, _y) \
do {\
(v).x = _x;\
(v).y = _y;\
} while (0)
#define SET_VEC3(v, _x, _y, _z) \
do {\
(v).x = _x;\
(v).y = _y;\
(v).z = _z;\
} while (0)
#define SET_VEC4(v, _x, _y, _z, _w) \
do {\
(v).x = _x;\
(v).y = _y;\
(v).z = _z;\
(v).w = _w;\
} while (0)
inline vec2 make_vec2(float x, float y)
{
vec2 v = { x, y };
return v;
}
inline vec3 make_vec3(float x, float y, float z)
{
vec3 v = { x, y, z };
return v;
}
inline vec4 make_vec4(float x, float y, float z, float w)
{
vec4 v = { x, y, z, w };
return v;
}
inline vec2 negate_vec2(vec2 v)
{
vec2 r = { -v.x, -v.y };
return r;
}
inline vec3 negate_vec3(vec3 v)
{
vec3 r = { -v.x, -v.y, -v.z };
return r;
}
inline vec4 negate_vec4(vec4 v)
{
vec4 r = { -v.x, -v.y, -v.z, -v.w };
return r;
}
inline void fprint_vec2(FILE* f, vec2 v, const char* append)
{
fprintf(f, "(%f, %f)%s", v.x, v.y, append);
}
inline void fprint_vec3(FILE* f, vec3 v, const char* append)
{
fprintf(f, "(%f, %f, %f)%s", v.x, v.y, v.z, append);
}
inline void fprint_vec4(FILE* f, vec4 v, const char* append)
{
fprintf(f, "(%f, %f, %f, %f)%s", v.x, v.y, v.z, v.w, append);
}
inline void print_vec2(vec2 v, const char* append)
{
printf("(%f, %f)%s", v.x, v.y, append);
}
inline void print_vec3(vec3 v, const char* append)
{
printf("(%f, %f, %f)%s", v.x, v.y, v.z, append);
}
inline void print_vec4(vec4 v, const char* append)
{
printf("(%f, %f, %f, %f)%s", v.x, v.y, v.z, v.w, append);
}
inline int fread_vec2(FILE* f, vec2* v)
{
int tmp = fscanf(f, " (%f, %f)", &v->x, &v->y);
return (tmp == 2);
}
inline int fread_vec3(FILE* f, vec3* v)
{
int tmp = fscanf(f, " (%f, %f, %f)", &v->x, &v->y, &v->z);
return (tmp == 3);
}
inline int fread_vec4(FILE* f, vec4* v)
{
int tmp = fscanf(f, " (%f, %f, %f, %f)", &v->x, &v->y, &v->z, &v->w);
return (tmp == 4);
}
typedef struct dvec2
{
double x;
double y;
} dvec2;
typedef struct dvec3
{
double x;
double y;
double z;
} dvec3;
typedef struct dvec4
{
double x;
double y;
double z;
double w;
} dvec4;
inline void fprint_dvec2(FILE* f, dvec2 v, const char* append)
{
fprintf(f, "(%f, %f)%s", v.x, v.y, append);
}
inline void fprint_dvec3(FILE* f, dvec3 v, const char* append)
{
fprintf(f, "(%f, %f, %f)%s", v.x, v.y, v.z, append);
}
inline void fprint_dvec4(FILE* f, dvec4 v, const char* append)
{
fprintf(f, "(%f, %f, %f, %f)%s", v.x, v.y, v.z, v.w, append);
}
inline int fread_dvec2(FILE* f, dvec2* v)
{
int tmp = fscanf(f, " (%lf, %lf)", &v->x, &v->y);
return (tmp == 2);
}
inline int fread_dvec3(FILE* f, dvec3* v)
{
int tmp = fscanf(f, " (%lf, %lf, %lf)", &v->x, &v->y, &v->z);
return (tmp == 3);
}
inline int fread_dvec4(FILE* f, dvec4* v)
{
int tmp = fscanf(f, " (%lf, %lf, %lf, %lf)", &v->x, &v->y, &v->z, &v->w);
return (tmp == 4);
}
typedef struct ivec2
{
int x;
int y;
} ivec2;
typedef struct ivec3
{
int x;
int y;
int z;
} ivec3;
typedef struct ivec4
{
int x;
int y;
int z;
int w;
} ivec4;
inline ivec2 make_ivec2(int x, int y)
{
ivec2 v = { x, y };
return v;
}
inline ivec3 make_ivec3(int x, int y, int z)
{
ivec3 v = { x, y, z };
return v;
}
inline ivec4 make_ivec4(int x, int y, int z, int w)
{
ivec4 v = { x, y, z, w };
return v;
}
inline void fprint_ivec2(FILE* f, ivec2 v, const char* append)
{
fprintf(f, "(%d, %d)%s", v.x, v.y, append);
}
inline void fprint_ivec3(FILE* f, ivec3 v, const char* append)
{
fprintf(f, "(%d, %d, %d)%s", v.x, v.y, v.z, append);
}
inline void fprint_ivec4(FILE* f, ivec4 v, const char* append)
{
fprintf(f, "(%d, %d, %d, %d)%s", v.x, v.y, v.z, v.w, append);
}
inline int fread_ivec2(FILE* f, ivec2* v)
{
int tmp = fscanf(f, " (%d, %d)", &v->x, &v->y);
return (tmp == 2);
}
inline int fread_ivec3(FILE* f, ivec3* v)
{
int tmp = fscanf(f, " (%d, %d, %d)", &v->x, &v->y, &v->z);
return (tmp == 3);
}
inline int fread_ivec4(FILE* f, ivec4* v)
{
int tmp = fscanf(f, " (%d, %d, %d, %d)", &v->x, &v->y, &v->z, &v->w);
return (tmp == 4);
}
typedef struct uvec2
{
unsigned int x;
unsigned int y;
} uvec2;
typedef struct uvec3
{
unsigned int x;
unsigned int y;
unsigned int z;
} uvec3;
typedef struct uvec4
{
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
} uvec4;
inline void fprint_uvec2(FILE* f, uvec2 v, const char* append)
{
fprintf(f, "(%u, %u)%s", v.x, v.y, append);
}
inline void fprint_uvec3(FILE* f, uvec3 v, const char* append)
{
fprintf(f, "(%u, %u, %u)%s", v.x, v.y, v.z, append);
}
inline void fprint_uvec4(FILE* f, uvec4 v, const char* append)
{
fprintf(f, "(%u, %u, %u, %u)%s", v.x, v.y, v.z, v.w, append);
}
inline int fread_uvec2(FILE* f, uvec2* v)
{
int tmp = fscanf(f, " (%u, %u)", &v->x, &v->y);
return (tmp == 2);
}
inline int fread_uvec3(FILE* f, uvec3* v)
{
int tmp = fscanf(f, " (%u, %u, %u)", &v->x, &v->y, &v->z);
return (tmp == 3);
}
inline int fread_uvec4(FILE* f, uvec4* v)
{
int tmp = fscanf(f, " (%u, %u, %u, %u)", &v->x, &v->y, &v->z, &v->w);
return (tmp == 4);
}
inline float length_vec2(vec2 a)
{
return sqrt(a.x * a.x + a.y * a.y);
}
inline float length_vec3(vec3 a)
{
return sqrt(a.x * a.x + a.y * a.y + a.z * a.z);
}
inline vec2 norm_vec2(vec2 a)
{
float l = length_vec2(a);
vec2 c = { a.x/l, a.y/l };
return c;
}
inline vec3 norm_vec3(vec3 a)
{
float l = length_vec3(a);
vec3 c = { a.x/l, a.y/l, a.z/l };
return c;
}
inline void normalize_vec2(vec2* a)
{
float l = length_vec2(*a);
a->x /= l;
a->y /= l;
}
inline void normalize_vec3(vec3* a)
{
float l = length_vec3(*a);
a->x /= l;
a->y /= l;
a->z /= l;
}
inline vec2 add_vec2s(vec2 a, vec2 b)
{
vec2 c = { a.x + b.x, a.y + b.y };
return c;
}
inline vec3 add_vec3s(vec3 a, vec3 b)
{
vec3 c = { a.x + b.x, a.y + b.y, a.z + b.z };
return c;
}
inline vec4 add_vec4s(vec4 a, vec4 b)
{
vec4 c = { a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w };
return c;
}
inline vec2 sub_vec2s(vec2 a, vec2 b)
{
vec2 c = { a.x - b.x, a.y - b.y };
return c;
}
inline vec3 sub_vec3s(vec3 a, vec3 b)
{
vec3 c = { a.x - b.x, a.y - b.y, a.z - b.z };
return c;
}
inline vec4 sub_vec4s(vec4 a, vec4 b)
{
vec4 c = { a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w };
return c;
}
inline vec2 mult_vec2s(vec2 a, vec2 b)
{
vec2 c = { a.x * b.x, a.y * b.y };
return c;
}
inline vec3 mult_vec3s(vec3 a, vec3 b)
{
vec3 c = { a.x * b.x, a.y * b.y, a.z * b.z };
return c;
}
inline vec4 mult_vec4s(vec4 a, vec4 b)
{
vec4 c = { a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w };
return c;
}
inline vec2 div_vec2s(vec2 a, vec2 b)
{
vec2 c = { a.x / b.x, a.y / b.y };
return c;
}
inline vec3 div_vec3s(vec3 a, vec3 b)
{
vec3 c = { a.x / b.x, a.y / b.y, a.z / b.z };
return c;
}
inline vec4 div_vec4s(vec4 a, vec4 b)
{
vec4 c = { a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w };
return c;
}
inline float dot_vec2s(vec2 a, vec2 b)
{
return a.x*b.x + a.y*b.y;
}
inline float dot_vec3s(vec3 a, vec3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline float dot_vec4s(vec4 a, vec4 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
}
inline vec2 scale_vec2(vec2 a, float s)
{
vec2 b = { a.x * s, a.y * s };
return b;
}
inline vec3 scale_vec3(vec3 a, float s)
{
vec3 b = { a.x * s, a.y * s, a.z * s };
return b;
}
inline vec4 scale_vec4(vec4 a, float s)
{
vec4 b = { a.x * s, a.y * s, a.z * s, a.w * s };
return b;
}
inline int equal_vec2s(vec2 a, vec2 b)
{
return (a.x == b.x && a.y == b.y);
}
inline int equal_vec3s(vec3 a, vec3 b)
{
return (a.x == b.x && a.y == b.y && a.z == b.z);
}
inline int equal_vec4s(vec4 a, vec4 b)
{
return (a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w);
}
inline int equal_epsilon_vec2s(vec2 a, vec2 b, float epsilon)
{
return (fabs(a.x-b.x) < epsilon && fabs(a.y - b.y) < epsilon);
}
inline int equal_epsilon_vec3s(vec3 a, vec3 b, float epsilon)
{
return (fabs(a.x-b.x) < epsilon && fabs(a.y - b.y) < epsilon &&
fabs(a.z - b.z) < epsilon);
}
inline int equal_epsilon_vec4s(vec4 a, vec4 b, float epsilon)
{
return (fabs(a.x-b.x) < epsilon && fabs(a.y - b.y) < epsilon &&
fabs(a.z - b.z) < epsilon && fabs(a.w - b.w) < epsilon);
}
inline vec2 vec4_to_vec2(vec4 a)
{
vec2 v = { a.x, a.y };
return v;
}
inline vec3 vec4_to_vec3(vec4 a)
{
vec3 v = { a.x, a.y, a.z };
return v;
}
inline vec2 vec4_to_vec2h(vec4 a)
{
vec2 v = { a.x/a.w, a.y/a.w };
return v;
}
inline vec3 vec4_to_vec3h(vec4 a)
{
vec3 v = { a.x/a.w, a.y/a.w, a.z/a.w };
return v;
}
inline vec3 cross_product(const vec3 u, const vec3 v)
{
vec3 result;
result.x = u.y*v.z - v.y*u.z;
result.y = -u.x*v.z + v.x*u.z;
result.z = u.x*v.y - v.x*u.y;
return result;
}
inline float angle_between_vec3(const vec3 u, const vec3 v)
{
return acos(dot_vec3s(u, v));
}
/* matrices **************/
typedef float mat2[4];
typedef float mat3[9];
typedef float mat4[16];
#define IDENTITY_MAT2() { 1, 0, 0, 1 }
#define IDENTITY_MAT3() { 1, 0, 0, 0, 1, 0, 0, 0, 1 }
#define IDENTITY_MAT4() { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }
#define SET_IDENTITY_MAT2(m) \
do { \
m[1] = m[2] = 0; \
m[0] = m[3] = 1; \
} while (0)
#define SET_IDENTITY_MAT3(m) \
do { \
memset(m, 0, sizeof(float)*9); \
m[0] = m[4] = m[8] = 1; \
} while (0)
#define SET_IDENTITY_MAT4(m) \
do { \
memset(m, 0, sizeof(float)*16); \
m[0] = m[5] = m[10] = m[15] = 1; \
} while (0)
#ifndef ROW_MAJOR
inline vec2 x_mat2(mat2 m) { return make_vec2(m[0], m[2]); }
inline vec2 y_mat2(mat2 m) { return make_vec2(m[1], m[3]); }
inline vec2 c1_mat2(mat2 m) { return make_vec2(m[0], m[1]); }
inline vec2 c2_mat2(mat2 m) { return make_vec2(m[2], m[3]); }
inline void setc1_mat2(mat2 m, vec2 v) { m[0]=v.x, m[1]=v.y; }
inline void setc2_mat2(mat2 m, vec2 v) { m[2]=v.x, m[3]=v.y; }
inline void setx_mat2(mat2 m, vec2 v) { m[0]=v.x, m[2]=v.y; }
inline void sety_mat2(mat2 m, vec2 v) { m[1]=v.x, m[3]=v.y; }
#else
inline vec2 x_mat2(mat2 m) { return make_vec2(m[0], m[1]); }
inline vec2 y_mat2(mat2 m) { return make_vec2(m[2], m[3]); }
inline vec2 c1_mat2(mat2 m) { return make_vec2(m[0], m[2]); }
inline vec2 c2_mat2(mat2 m) { return make_vec2(m[1], m[3]); }
inline void setc1_mat2(mat2 m, vec2 v) { m[0]=v.x, m[2]=v.y; }
inline void setc2_mat2(mat2 m, vec2 v) { m[1]=v.x, m[3]=v.y; }
inline void setx_mat2(mat2 m, vec2 v) { m[0]=v.x, m[1]=v.y; }
inline void sety_mat2(mat2 m, vec2 v) { m[2]=v.x, m[3]=v.y; }
#endif
#ifndef ROW_MAJOR
inline vec3 x_mat3(mat3 m) { return make_vec3(m[0], m[3], m[6]); }
inline vec3 y_mat3(mat3 m) { return make_vec3(m[1], m[4], m[7]); }
inline vec3 z_mat3(mat3 m) { return make_vec3(m[2], m[5], m[8]); }
inline vec3 c1_mat3(mat3 m) { return make_vec3(m[0], m[1], m[2]); }
inline vec3 c2_mat3(mat3 m) { return make_vec3(m[3], m[4], m[5]); }
inline vec3 c3_mat3(mat3 m) { return make_vec3(m[6], m[7], m[8]); }
inline void setc1_mat3(mat3 m, vec3 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z; }
inline void setc2_mat3(mat3 m, vec3 v) { m[3]=v.x, m[4]=v.y, m[5]=v.z; }
inline void setc3_mat3(mat3 m, vec3 v) { m[6]=v.x, m[7]=v.y, m[8]=v.z; }
inline void setx_mat3(mat3 m, vec3 v) { m[0]=v.x, m[3]=v.y, m[6]=v.z; }
inline void sety_mat3(mat3 m, vec3 v) { m[1]=v.x, m[4]=v.y, m[7]=v.z; }
inline void setz_mat3(mat3 m, vec3 v) { m[2]=v.x, m[5]=v.y, m[8]=v.z; }
#else
inline vec3 x_mat3(mat3 m) { return make_vec3(m[0], m[1], m[2]); }
inline vec3 y_mat3(mat3 m) { return make_vec3(m[3], m[4], m[5]); }
inline vec3 z_mat3(mat3 m) { return make_vec3(m[6], m[7], m[8]); }
inline vec3 c1_mat3(mat3 m) { return make_vec3(m[0], m[3], m[6]); }
inline vec3 c2_mat3(mat3 m) { return make_vec3(m[1], m[4], m[7]); }
inline vec3 c3_mat3(mat3 m) { return make_vec3(m[2], m[5], m[8]); }
inline void setc1_mat3(mat3 m, vec3 v) { m[0]=v.x, m[3]=v.y, m[6]=v.z; }
inline void setc2_mat3(mat3 m, vec3 v) { m[1]=v.x, m[4]=v.y, m[7]=v.z; }
inline void setc3_mat3(mat3 m, vec3 v) { m[2]=v.x, m[5]=v.y, m[8]=v.z; }
inline void setx_mat3(mat3 m, vec3 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z; }
inline void sety_mat3(mat3 m, vec3 v) { m[3]=v.x, m[4]=v.y, m[5]=v.z; }
inline void setz_mat3(mat3 m, vec3 v) { m[6]=v.x, m[7]=v.y, m[8]=v.z; }
#endif
#ifndef ROW_MAJOR
inline vec4 c1_mat4(mat4 m) { return make_vec4(m[ 0], m[ 1], m[ 2], m[ 3]); }
inline vec4 c2_mat4(mat4 m) { return make_vec4(m[ 4], m[ 5], m[ 6], m[ 7]); }
inline vec4 c3_mat4(mat4 m) { return make_vec4(m[ 8], m[ 9], m[10], m[11]); }
inline vec4 c4_mat4(mat4 m) { return make_vec4(m[12], m[13], m[14], m[15]); }
inline vec4 x_mat4(mat4 m) { return make_vec4(m[0], m[4], m[8], m[12]); }
inline vec4 y_mat4(mat4 m) { return make_vec4(m[1], m[5], m[9], m[13]); }
inline vec4 z_mat4(mat4 m) { return make_vec4(m[2], m[6], m[10], m[14]); }
inline vec4 w_mat4(mat4 m) { return make_vec4(m[3], m[7], m[11], m[15]); }
//sets 4th row to 0 0 0 1
inline void setc1_mat4v3(mat4 m, vec3 v) { m[ 0]=v.x, m[ 1]=v.y, m[ 2]=v.z, m[ 3]=0; }
inline void setc2_mat4v3(mat4 m, vec3 v) { m[ 4]=v.x, m[ 5]=v.y, m[ 6]=v.z, m[ 7]=0; }
inline void setc3_mat4v3(mat4 m, vec3 v) { m[ 8]=v.x, m[ 9]=v.y, m[10]=v.z, m[11]=0; }
inline void setc4_mat4v3(mat4 m, vec3 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=1; }
inline void setc1_mat4v4(mat4 m, vec4 v) { m[ 0]=v.x, m[ 1]=v.y, m[ 2]=v.z, m[ 3]=v.w; }
inline void setc2_mat4v4(mat4 m, vec4 v) { m[ 4]=v.x, m[ 5]=v.y, m[ 6]=v.z, m[ 7]=v.w; }
inline void setc3_mat4v4(mat4 m, vec4 v) { m[ 8]=v.x, m[ 9]=v.y, m[10]=v.z, m[11]=v.w; }
inline void setc4_mat4v4(mat4 m, vec4 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=v.w; }
//sets 4th column to 0 0 0 1
inline void setx_mat4v3(mat4 m, vec3 v) { m[0]=v.x, m[4]=v.y, m[ 8]=v.z, m[12]=0; }
inline void sety_mat4v3(mat4 m, vec3 v) { m[1]=v.x, m[5]=v.y, m[ 9]=v.z, m[13]=0; }
inline void setz_mat4v3(mat4 m, vec3 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=0; }
inline void setw_mat4v3(mat4 m, vec3 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=1; }
inline void setx_mat4v4(mat4 m, vec4 v) { m[0]=v.x, m[4]=v.y, m[ 8]=v.z, m[12]=v.w; }
inline void sety_mat4v4(mat4 m, vec4 v) { m[1]=v.x, m[5]=v.y, m[ 9]=v.z, m[13]=v.w; }
inline void setz_mat4v4(mat4 m, vec4 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=v.w; }
inline void setw_mat4v4(mat4 m, vec4 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=v.w; }
#else
inline vec4 c1_mat4(mat4 m) { return make_vec4(m[0], m[4], m[8], m[12]); }
inline vec4 c2_mat4(mat4 m) { return make_vec4(m[1], m[5], m[9], m[13]); }
inline vec4 c3_mat4(mat4 m) { return make_vec4(m[2], m[6], m[10], m[14]); }
inline vec4 c4_mat4(mat4 m) { return make_vec4(m[3], m[7], m[11], m[15]); }
inline vec4 x_mat4(mat4 m) { return make_vec4(m[0], m[1], m[2], m[3]); }
inline vec4 y_mat4(mat4 m) { return make_vec4(m[4], m[5], m[6], m[7]); }
inline vec4 z_mat4(mat4 m) { return make_vec4(m[8], m[9], m[10], m[11]); }
inline vec4 w_mat4(mat4 m) { return make_vec4(m[12], m[13], m[14], m[15]); }
//sets 4th row to 0 0 0 1
inline void setc1_mat4v3(mat4 m, vec3 v) { m[0]=v.x, m[4]=v.y, m[8]=v.z, m[12]=0; }
inline void setc2_mat4v3(mat4 m, vec3 v) { m[1]=v.x, m[5]=v.y, m[9]=v.z, m[13]=0; }
inline void setc3_mat4v3(mat4 m, vec3 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=0; }
inline void setc4_mat4v3(mat4 m, vec3 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=1; }
inline void setc1_mat4v4(mat4 m, vec4 v) { m[0]=v.x, m[4]=v.y, m[8]=v.z, m[12]=v.w; }
inline void setc2_mat4v4(mat4 m, vec4 v) { m[1]=v.x, m[5]=v.y, m[9]=v.z, m[13]=v.w; }
inline void setc3_mat4v4(mat4 m, vec4 v) { m[2]=v.x, m[6]=v.y, m[10]=v.z, m[14]=v.w; }
inline void setc4_mat4v4(mat4 m, vec4 v) { m[3]=v.x, m[7]=v.y, m[11]=v.z, m[15]=v.w; }
//sets 4th column to 0 0 0 1
inline void setx_mat4v3(mat4 m, vec3 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z, m[3]=0; }
inline void sety_mat4v3(mat4 m, vec3 v) { m[4]=v.x, m[5]=v.y, m[6]=v.z, m[7]=0; }
inline void setz_mat4v3(mat4 m, vec3 v) { m[8]=v.x, m[9]=v.y, m[10]=v.z, m[11]=0; }
inline void setw_mat4v3(mat4 m, vec3 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=1; }
inline void setx_mat4v4(mat4 m, vec4 v) { m[0]=v.x, m[1]=v.y, m[2]=v.z, m[3]=v.w; }
inline void sety_mat4v4(mat4 m, vec4 v) { m[4]=v.x, m[5]=v.y, m[6]=v.z, m[7]=v.w; }
inline void setz_mat4v4(mat4 m, vec4 v) { m[8]=v.x, m[9]=v.y, m[10]=v.z, m[11]=v.w; }
inline void setw_mat4v4(mat4 m, vec4 v) { m[12]=v.x, m[13]=v.y, m[14]=v.z, m[15]=v.w; }
#endif
inline void fprint_mat2(FILE* f, mat2 m, const char* append)
{
#ifndef ROW_MAJOR
fprintf(f, "[(%f, %f)\n (%f, %f)]%s",
m[0], m[2], m[1], m[3], append);
#else
fprintf(f, "[(%f, %f)\n (%f, %f)]%s",
m[0], m[1], m[2], m[3], append);
#endif
}
inline void fprint_mat3(FILE* f, mat3 m, const char* append)
{
#ifndef ROW_MAJOR
fprintf(f, "[(%f, %f, %f)\n (%f, %f, %f)\n (%f, %f, %f)]%s",
m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8], append);
#else
fprintf(f, "[(%f, %f, %f)\n (%f, %f, %f)\n (%f, %f, %f)]%s",
m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], append);
#endif
}
inline void fprint_mat4(FILE* f, mat4 m, const char* append)
{
#ifndef ROW_MAJOR
fprintf(f, "[(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)]%s",
m[0], m[4], m[8], m[12], m[1], m[5], m[9], m[13], m[2], m[6], m[10], m[14],
m[3], m[7], m[11], m[15], append);
#else
fprintf(f, "[(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)\n(%f, %f, %f, %f)]%s",
m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8], m[9], m[10], m[11],
m[12], m[13], m[14], m[15], append);
#endif
}
// macros?
inline void print_mat2(mat2 m, const char* append)
{
fprint_mat2(stdout, m, append);
}
inline void print_mat3(mat3 m, const char* append)
{
fprint_mat3(stdout, m, append);
}
inline void print_mat4(mat4 m, const char* append)
{
fprint_mat4(stdout, m, append);
}
//TODO define macros for doing array version
inline vec2 mult_mat2_vec2(mat2 m, vec2 v)
{
vec2 r;
#ifndef ROW_MAJOR
r.x = m[0]*v.x + m[2]*v.y;
r.y = m[1]*v.x + m[3]*v.y;
#else
r.x = m[0]*v.x + m[1]*v.y;
r.y = m[3]*v.x + m[3]*v.y;
#endif
return r;
}
inline vec3 mult_mat3_vec3(mat3 m, vec3 v)
{
vec3 r;
#ifndef ROW_MAJOR
r.x = m[0]*v.x + m[3]*v.y + m[6]*v.z;
r.y = m[1]*v.x + m[4]*v.y + m[7]*v.z;
r.z = m[2]*v.x + m[5]*v.y + m[8]*v.z;
#else
r.x = m[0]*v.x + m[1]*v.y + m[2]*v.z;
r.y = m[3]*v.x + m[4]*v.y + m[5]*v.z;
r.z = m[6]*v.x + m[7]*v.y + m[8]*v.z;
#endif
return r;
}
inline vec4 mult_mat4_vec4(mat4 m, vec4 v)
{
vec4 r;
#ifndef ROW_MAJOR
r.x = m[0]*v.x + m[4]*v.y + m[8]*v.z + m[12]*v.w;
r.y = m[1]*v.x + m[5]*v.y + m[9]*v.z + m[13]*v.w;
r.z = m[2]*v.x + m[6]*v.y + m[10]*v.z + m[14]*v.w;
r.w = m[3]*v.x + m[7]*v.y + m[11]*v.z + m[15]*v.w;
#else
r.x = m[0]*v.x + m[1]*v.y + m[2]*v.z + m[3]*v.w;
r.y = m[4]*v.x + m[5]*v.y + m[6]*v.z + m[7]*v.w;
r.z = m[8]*v.x + m[9]*v.y + m[10]*v.z + m[11]*v.w;
r.w = m[12]*v.x + m[13]*v.y + m[14]*v.z + m[15]*v.w;
#endif
return r;
}
void mult_mat4_mat4(mat4 c, mat4 a, mat4 b);
void load_rotation_mat3(mat3 mat, vec3 v, float angle);
void load_rotation_mat4(mat4 mat, vec3 vec, float angle);
//void invert_mat4(mat4 mInverse, const mat4 m);
void make_perspective_matrix(mat4 mat, float fFov, float aspect, float near, float far);
void make_pers_matrix(mat4 mat, float z_near, float z_far);
void make_perspective_proj_matrix(mat4 mat, float left, float right, float bottom, float top, float near, float far);
void make_orthographic_matrix(mat4 mat, float left, float right, float bottom, float top, float near, float far);
void make_viewport_matrix(mat4 mat, int x, int y, unsigned int width, unsigned int height, int opengl);
void lookAt(mat4 mat, vec3 eye, vec3 center, vec3 up);
///////////Matrix transformation functions
inline void scale_mat3(mat3 m, float x, float y, float z)
{
#ifndef ROW_MAJOR
m[0] = x; m[3] = 0; m[6] = 0;
m[1] = 0; m[4] = y; m[7] = 0;
m[2] = 0; m[5] = 0; m[8] = z;
#else
m[0] = x; m[1] = 0; m[2] = 0;
m[3] = 0; m[4] = y; m[5] = 0;
m[6] = 0; m[7] = 0; m[8] = z;
#endif
}
inline void scale_mat4(mat4 m, float x, float y, float z)
{
#ifndef ROW_MAJOR
m[ 0] = x; m[ 4] = 0; m[ 8] = 0; m[12] = 0;
m[ 1] = 0; m[ 5] = y; m[ 9] = 0; m[13] = 0;
m[ 2] = 0; m[ 6] = 0; m[10] = z; m[14] = 0;
m[ 3] = 0; m[ 7] = 0; m[11] = 0; m[15] = 1;
#else
m[ 0] = x; m[ 1] = 0; m[ 2] = 0; m[ 3] = 0;
m[ 4] = 0; m[ 5] = y; m[ 6] = 0; m[ 7] = 0;
m[ 8] = 0; m[ 9] = 0; m[10] = z; m[11] = 0;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
#endif
}
// Create a Translation matrix. Only 4x4 matrices have translation components
inline void translation_mat4(mat4 m, float x, float y, float z)
{
#ifndef ROW_MAJOR
m[ 0] = 1; m[ 4] = 0; m[ 8] = 0; m[12] = x;
m[ 1] = 0; m[ 5] = 1; m[ 9] = 0; m[13] = y;
m[ 2] = 0; m[ 6] = 0; m[10] = 1; m[14] = z;
m[ 3] = 0; m[ 7] = 0; m[11] = 0; m[15] = 1;
#else
m[ 0] = 1; m[ 1] = 0; m[ 2] = 0; m[ 3] = x;
m[ 4] = 0; m[ 5] = 1; m[ 6] = 0; m[ 7] = y;
m[ 8] = 0; m[ 9] = 0; m[10] = 1; m[11] = z;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
#endif
}
// Extract a rotation matrix from a 4x4 matrix
// Extracts the rotation matrix (3x3) from a 4x4 matrix
//
#ifndef ROW_MAJOR
#define M44(m, row, col) m[col*4 + row]
#define M33(m, row, col) m[col*3 + row]
#else
#define M44(m, row, col) m[row*4 + col]
#define M33(m, row, col) m[row*3 + col]
#endif
inline void extract_rotation_mat4(mat3 dst, mat4 src, int normalize)
{
vec3 tmp;
if (normalize) {
tmp.x = M44(src, 0, 0);
tmp.y = M44(src, 1, 0);
tmp.z = M44(src, 2, 0);
normalize_vec3(&tmp);
M33(dst, 0, 0) = tmp.x;
M33(dst, 1, 0) = tmp.y;
M33(dst, 2, 0) = tmp.z;
tmp.x = M44(src, 0, 1);
tmp.y = M44(src, 1, 1);
tmp.z = M44(src, 2, 1);
normalize_vec3(&tmp);
M33(dst, 0, 1) = tmp.x;
M33(dst, 1, 1) = tmp.y;
M33(dst, 2, 1) = tmp.z;
tmp.x = M44(src, 0, 2);
tmp.y = M44(src, 1, 2);
tmp.z = M44(src, 2, 2);
normalize_vec3(&tmp);
M33(dst, 0, 2) = tmp.x;
M33(dst, 1, 2) = tmp.y;
M33(dst, 2, 2) = tmp.z;
} else {
M33(dst, 0, 0) = M44(src, 0, 0);
M33(dst, 1, 0) = M44(src, 1, 0);
M33(dst, 2, 0) = M44(src, 2, 0);
M33(dst, 0, 1) = M44(src, 0, 1);
M33(dst, 1, 1) = M44(src, 1, 1);
M33(dst, 2, 1) = M44(src, 2, 1);
M33(dst, 0, 2) = M44(src, 0, 2);
M33(dst, 1, 2) = M44(src, 1, 2);
M33(dst, 2, 2) = M44(src, 2, 2);
}
}
#undef M33
#undef M44
#ifndef EXCLUDE_GLSL
// GLSL functions
//
static inline float clamp_01(float f)
{
if (f < 0.0f) return 0.0f;
if (f > 1.0f) return 1.0f;
return f;
}
static inline float clamp(float x, float minVal, float maxVal)
{
if (x < minVal) return minVal;
if (x > maxVal) return maxVal;
return x;
}
#define PGL_VECTORIZE2_VEC(func) \
inline vec2 func##_vec2(vec2 v) \
{ \
return make_vec2(func(v.x), func(v.y)); \
}
#define PGL_VECTORIZE3_VEC(func) \
inline vec3 func##_vec3(vec3 v) \
{ \
return make_vec3(func(v.x), func(v.y), func(v.z)); \
}
#define PGL_VECTORIZE4_VEC(func) \
inline vec4 func##_vec4(vec4 v) \
{ \
return make_vec4(func(v.x), func(v.y), func(v.z), func(v.w)); \
}
#define PGL_VECTORIZE_VEC(func) \
PGL_VECTORIZE2_VEC(func) \
PGL_VECTORIZE3_VEC(func) \
PGL_VECTORIZE4_VEC(func)
#define PGL_STATIC_VECTORIZE2_VEC(func) \
static inline vec2 func##_vec2(vec2 v) \
{ \
return make_vec2(func(v.x), func(v.y)); \
}
#define PGL_STATIC_VECTORIZE3_VEC(func) \
static inline vec3 func##_vec3(vec3 v) \
{ \
return make_vec3(func(v.x), func(v.y), func(v.z)); \
}
#define PGL_STATIC_VECTORIZE4_VEC(func) \
static inline vec4 func##_vec4(vec4 v) \
{ \
return make_vec4(func(v.x), func(v.y), func(v.z), func(v.w)); \
}
#define PGL_STATIC_VECTORIZE_VEC(func) \
PGL_STATIC_VECTORIZE2_VEC(func) \
PGL_STATIC_VECTORIZE3_VEC(func) \
PGL_STATIC_VECTORIZE4_VEC(func)
static inline vec2 clamp_vec2(vec2 x, float minVal, float maxVal)
{
return make_vec2(clamp(x.x, minVal, maxVal), clamp(x.y, minVal, maxVal));
}
static inline vec3 clamp_vec3(vec3 x, float minVal, float maxVal)
{
return make_vec3(clamp(x.x, minVal, maxVal), clamp(x.y, minVal, maxVal), clamp(x.z, minVal, maxVal));
}
static inline vec4 clamp_vec4(vec4 x, float minVal, float maxVal)
{
return make_vec4(clamp(x.x, minVal, maxVal), clamp(x.y, minVal, maxVal), clamp(x.z, minVal, maxVal), clamp(x.w, minVal, maxVal));
}
static float distance_vec2(vec2 a, vec2 b)
{
return length_vec2(sub_vec2s(a, b));
}
static float distance_vec3(vec3 a, vec3 b)
{
return length_vec3(sub_vec3s(a, b));
}
static inline vec3 reflect_vec3(vec3 i, vec3 n)
{
return sub_vec3s(i, scale_vec3(n, 2 * dot_vec3s(i, n)));
}
static inline float smoothstep(float edge0, float edge1, float x)
{
float t = clamp_01((x-edge0)/(edge1-edge0));
return t*t*(3 - 2*t);
}
static inline float mix(float x, float y, float a)
{
return x*(1-a) + y*a;
}
static inline vec2 mix_vec2s(vec2 x, vec2 y, float a)
{
return add_vec2s(scale_vec2(x, (1-a)), scale_vec2(y, a));
}
static inline vec3 mix_vec3s(vec3 x, vec3 y, float a)
{
return add_vec3s(scale_vec3(x, (1-a)), scale_vec3(y, a));
}
static inline vec4 mix_vec4s(vec4 x, vec4 y, float a)
{
return add_vec4s(scale_vec4(x, (1-a)), scale_vec4(y, a));
}
// TODO should I use the float versions or the double versions for slightly
// increased accuracy?
PGL_VECTORIZE_VEC(fabsf)
PGL_VECTORIZE_VEC(floorf)
PGL_VECTORIZE_VEC(ceilf)
PGL_VECTORIZE_VEC(sinf)
PGL_VECTORIZE_VEC(cosf)
PGL_VECTORIZE_VEC(tanf)
PGL_VECTORIZE_VEC(asinf)
PGL_VECTORIZE_VEC(acosf)
PGL_VECTORIZE_VEC(atanf)
PGL_VECTORIZE_VEC(sinhf)
PGL_VECTORIZE_VEC(coshf)
PGL_VECTORIZE_VEC(tanhf)
static inline float radians(float degrees) { return DEG_TO_RAD(degrees); }
static inline float degrees(float radians) { return RAD_TO_DEG(radians); }
static inline float fract(float x) { return x - floor(x); }
PGL_STATIC_VECTORIZE_VEC(radians)
PGL_STATIC_VECTORIZE_VEC(degrees)
PGL_STATIC_VECTORIZE_VEC(fract)
#endif
typedef struct Color
{
u8 r;
u8 g;
u8 b;
u8 a;
} Color;
/*
Color make_Color()
{
r = g = b = 0;
a = 255;
}
*/
inline Color make_Color(u8 red, u8 green, u8 blue, u8 alpha)
{
Color c = { red, green, blue, alpha };
return c;
}
inline void print_Color(Color c, const char* append)
{
printf("(%d, %d, %d, %d)%s", c.r, c.g, c.b, c.a, append);
}
inline Color vec4_to_Color(vec4 v)
{
Color c;
//assume all in the range of [0, 1]
//TODO(rswinkle): round like HH? ie (u8)(v.x * 255.0f + 0.5f)
c.r = v.x * 255;
c.g = v.y * 255;
c.b = v.z * 255;
c.a = v.w * 255;
return c;
}
inline vec4 Color_to_vec4(Color c)
{
vec4 v = { (float)c.r/255.0f, (float)c.g/255.0f, (float)c.b/255.0f, (float)c.a/255.0f };
return v;
}
typedef struct Line
{
float A, B, C;
} Line;
inline Line make_Line(float x1, float y1, float x2, float y2)
{
Line l;
l.A = y1 - y2;
l.B = x2 - x1;
l.C = x1*y2 - x2*y1;
return l;
}
inline float line_func(Line* line, float x, float y)
{
return line->A*x + line->B*y + line->C;
}
inline float line_findy(Line* line, float x)
{
return -(line->A*x + line->C)/line->B;
}
inline float line_findx(Line* line, float y)
{
return -(line->B*y + line->C)/line->A;
}
typedef struct Plane
{
vec3 n; //normal points x on plane satisfy n dot x = d
float d; //d = n dot p
} Plane;
/*
Plane() {}
Plane(vec3 a, vec3 b, vec3 c) //ccw winding
{
n = cross_product(b-a, c-a).norm();
d = n * a;
}
*/
//int intersect_segment_plane(vec3 a, vec3 b, Plane p, float* t, vec3* q);
// TODO hmm would have to change mat3 and mat4 to proper
// structures to have operators return them since our
// current mat*mat functions take the output mat as a parameter
#ifdef __cplusplus
inline vec3 operator*(mat3 m, vec3& v)
{
vec3 r;
#ifndef ROW_MAJOR
r.x = m[0]*v.x + m[3]*v.y + m[6]*v.z;
r.y = m[1]*v.x + m[4]*v.y + m[7]*v.z;
r.z = m[2]*v.x + m[5]*v.y + m[8]*v.z;
#else
r.x = m[0]*v.x + m[1]*v.y + m[2]*v.z;
r.y = m[3]*v.x + m[4]*v.y + m[5]*v.z;
r.z = m[6]*v.x + m[7]*v.y + m[8]*v.z;
#endif
return r;
}
#endif
/* CRSW_MATH_H */
#endif
#ifndef CVECTOR_float_H
#define CVECTOR_float_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for float vector. */
typedef struct cvector_float
{
float* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_float;
extern size_t CVEC_float_SZ;
int cvec_float(cvector_float* vec, size_t size, size_t capacity);
int cvec_init_float(cvector_float* vec, float* vals, size_t num);
cvector_float* cvec_float_heap(size_t size, size_t capacity);
cvector_float* cvec_init_float_heap(float* vals, size_t num);
int cvec_copyc_float(void* dest, void* src);
int cvec_copy_float(cvector_float* dest, cvector_float* src);
int cvec_push_float(cvector_float* vec, float a);
float cvec_pop_float(cvector_float* vec);
int cvec_extend_float(cvector_float* vec, size_t num);
int cvec_insert_float(cvector_float* vec, size_t i, float a);
int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num);
float cvec_replace_float(cvector_float* vec, size_t i, float a);
void cvec_erase_float(cvector_float* vec, size_t start, size_t end);
int cvec_reserve_float(cvector_float* vec, size_t size);
int cvec_set_cap_float(cvector_float* vec, size_t size);
void cvec_set_val_sz_float(cvector_float* vec, float val);
void cvec_set_val_cap_float(cvector_float* vec, float val);
float* cvec_back_float(cvector_float* vec);
void cvec_clear_float(cvector_float* vec);
void cvec_free_float_heap(void* vec);
void cvec_free_float(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_float_H */
#endif
#ifdef CVECTOR_float_IMPLEMENTATION
size_t CVEC_float_SZ = 50;
#define CVEC_float_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_float* cvec_float_heap(size_t size, size_t capacity)
{
cvector_float* vec;
if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_float* cvec_init_float_heap(float* vals, size_t num)
{
cvector_float* vec;
if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_float_SZ;
vec->size = num;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num);
return vec;
}
int cvec_float(cvector_float* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_float(cvector_float* vec, float* vals, size_t num)
{
vec->capacity = num + CVEC_float_SZ;
vec->size = num;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num);
return 1;
}
int cvec_copyc_float(void* dest, void* src)
{
cvector_float* vec1 = (cvector_float*)dest;
cvector_float* vec2 = (cvector_float*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_float(vec1, vec2);
}
int cvec_copy_float(cvector_float* dest, cvector_float* src)
{
float* tmp = NULL;
if (!(tmp = (float*)CVEC_REALLOC(dest->a, src->capacity*sizeof(float)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(float));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_float(cvector_float* vec, float a)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_float_ALLOCATOR(vec->capacity);
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
float cvec_pop_float(cvector_float* vec)
{
return vec->a[--vec->size];
}
float* cvec_back_float(cvector_float* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_float(cvector_float* vec, size_t num)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_float_SZ;
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_float(cvector_float* vec, size_t i, float a)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float));
vec->a[i] = a;
} else {
tmp_sz = CVEC_float_ALLOCATOR(vec->capacity);
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_float_SZ;
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(float));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(float));
vec->size += num;
return 1;
}
float cvec_replace_float(cvector_float* vec, size_t i, float a)
{
float tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_float(cvector_float* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(float));
vec->size -= d;
}
int cvec_reserve_float(cvector_float* vec, size_t size)
{
float* tmp;
if (vec->capacity < size) {
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*(size+CVEC_float_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_float_SZ;
}
return 1;
}
int cvec_set_cap_float(cvector_float* vec, size_t size)
{
float* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_float(cvector_float* vec, float val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_float(cvector_float* vec, float val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_float(cvector_float* vec) { vec->size = 0; }
void cvec_free_float_heap(void* vec)
{
cvector_float* tmp = (cvector_float*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_float(void* vec)
{
cvector_float* tmp = (cvector_float*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#include <stdint.h>
typedef uint32_t GLuint;
typedef int32_t GLint;
typedef int64_t GLint64;
typedef uint64_t GLuint64;
typedef uint16_t GLushort;
typedef int16_t GLshort;
typedef uint8_t GLubyte;
typedef int8_t GLbyte;
typedef char GLchar;
typedef int32_t GLsizei; //they use plain int not unsigned like you'd think
typedef int GLenum;
typedef int GLbitfield;
typedef float GLfloat;
typedef float GLclampf;
typedef double GLdouble;
typedef void GLvoid;
typedef uint8_t GLboolean;
enum
{
//gl error codes
GL_NO_ERROR = 0,
GL_INVALID_ENUM,
GL_INVALID_VALUE,
GL_INVALID_OPERATION,
GL_INVALID_FRAMEBUFFER_OPERATION,
GL_OUT_OF_MEMORY,
//buffer types
GL_ARRAY_BUFFER,
GL_COPY_READ_BUFFER,
GL_COPY_WRITE_BUFFER,
GL_ELEMENT_ARRAY_BUFFER,
GL_PIXEL_PACK_BUFFER,
GL_PIXEL_UNPACK_BUFFER,
GL_TEXTURE_BUFFER,
GL_TRANSFORM_FEEDBACK_BUFFER,
GL_UNIFORM_BUFFER,
GL_NUM_BUFFER_TYPES,
// Framebuffer stuff (unused/supported yet)
GL_FRAMEBUFFER,
GL_DRAW_FRAMEBUFFER,
GL_READ_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1,
GL_COLOR_ATTACHMENT2,
GL_COLOR_ATTACHMENT3,
GL_COLOR_ATTACHMENT4,
GL_COLOR_ATTACHMENT5,
GL_COLOR_ATTACHMENT6,
GL_COLOR_ATTACHMENT7,
GL_DEPTH_ATTACHMENT,
GL_STENCIL_ATTACHMENT,
GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
//buffer use hints (not used currently)
GL_STREAM_DRAW,
GL_STREAM_READ,
GL_STREAM_COPY,
GL_STATIC_DRAW,
GL_STATIC_READ,
GL_STATIC_COPY,
GL_DYNAMIC_DRAW,
GL_DYNAMIC_READ,
GL_DYNAMIC_COPY,
// mapped buffer access
GL_READ_ONLY,
GL_WRITE_ONLY,
GL_READ_WRITE,
//polygon modes
GL_POINT,
GL_LINE,
GL_FILL,
//primitive types
GL_POINTS,
GL_LINES,
GL_LINE_STRIP,
GL_LINE_LOOP,
GL_TRIANGLES,
GL_TRIANGLE_STRIP,
GL_TRIANGLE_FAN,
// unsupported primitives because I don't support the geometry shader
GL_LINE_STRIP_AJACENCY,
GL_LINES_AJACENCY,
GL_TRIANGLES_AJACENCY,
GL_TRIANGLE_STRIP_AJACENCY,
//depth functions (and stencil funcs)
GL_LESS,
GL_LEQUAL,
GL_GREATER,
GL_GEQUAL,
GL_EQUAL,
GL_NOTEQUAL,
GL_ALWAYS,
GL_NEVER,
//blend functions
GL_ZERO,
GL_ONE,
GL_SRC_COLOR,
GL_ONE_MINUS_SRC_COLOR,
GL_DST_COLOR,
GL_ONE_MINUS_DST_COLOR,
GL_SRC_ALPHA,
GL_ONE_MINUS_SRC_ALPHA,
GL_DST_ALPHA,
GL_ONE_MINUS_DST_ALPHA,
GL_CONSTANT_COLOR,
GL_ONE_MINUS_CONSTANT_COLOR,
GL_CONSTANT_ALPHA,
GL_ONE_MINUS_CONSTANT_ALPHA,
GL_SRC_ALPHA_SATURATE,
NUM_BLEND_FUNCS,
GL_SRC1_COLOR,
GL_ONE_MINUS_SRC1_COLOR,
GL_SRC1_ALPHA,
GL_ONE_MINUS_SRC1_ALPHA,
//NUM_BLEND_FUNCS
//blend equations
GL_FUNC_ADD,
GL_FUNC_SUBTRACT,
GL_FUNC_REVERSE_SUBTRACT,
GL_MIN,
GL_MAX,
NUM_BLEND_EQUATIONS,
//texture types
GL_TEXTURE_UNBOUND,
GL_TEXTURE_1D,
GL_TEXTURE_2D,
GL_TEXTURE_3D,
GL_TEXTURE_1D_ARRAY,
GL_TEXTURE_2D_ARRAY,
GL_TEXTURE_RECTANGLE,
GL_TEXTURE_CUBE_MAP,
GL_NUM_TEXTURE_TYPES,
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
//texture parameters i
GL_TEXTURE_BASE_LEVEL,
GL_TEXTURE_BORDER_COLOR, // doesn't actually do anything
GL_TEXTURE_COMPARE_FUNC,
GL_TEXTURE_COMPARE_MODE,
GL_TEXTURE_LOD_BIAS,
GL_TEXTURE_MIN_FILTER,
GL_TEXTURE_MAG_FILTER,
GL_TEXTURE_MIN_LOD,
GL_TEXTURE_MAX_LOD,
GL_TEXTURE_MAX_LEVEL,
GL_TEXTURE_SWIZZLE_R,
GL_TEXTURE_SWIZZLE_G,
GL_TEXTURE_SWIZZLE_B,
GL_TEXTURE_SWIZZLE_A,
GL_TEXTURE_SWIZZLE_RGBA,
GL_TEXTURE_WRAP_S,
GL_TEXTURE_WRAP_T,
GL_TEXTURE_WRAP_R,
//texture parameter values
GL_REPEAT,
GL_CLAMP_TO_EDGE,
GL_CLAMP_TO_BORDER, // not supported, alias to CLAMP_TO_EDGE
GL_MIRRORED_REPEAT,
GL_NEAREST,
GL_LINEAR,
GL_NEAREST_MIPMAP_NEAREST,
GL_NEAREST_MIPMAP_LINEAR,
GL_LINEAR_MIPMAP_NEAREST,
GL_LINEAR_MIPMAP_LINEAR,
//texture/depth/stencil formats
GL_RED,
GL_RG,
GL_RGB,
GL_BGR,
GL_RGBA,
GL_BGRA,
GL_COMPRESSED_RED,
GL_COMPRESSED_RG,
GL_COMPRESSED_RGB,
GL_COMPRESSED_RGBA,
//lots more go here but not important
// None of these are used currently just to help porting
GL_DEPTH_COMPONENT16,
GL_DEPTH_COMPONENT24,
GL_DEPTH_COMPONENT32,
GL_DEPTH_COMPONENT32F, // PGL uses a float depth buffer
GL_DEPTH24_STENCIL8,
GL_DEPTH32F_STENCIL8, // <- we do this
GL_STENCIL_INDEX1,
GL_STENCIL_INDEX4,
GL_STENCIL_INDEX8, // this
GL_STENCIL_INDEX16,
//PixelStore parameters
GL_UNPACK_ALIGNMENT,
GL_PACK_ALIGNMENT,
// Texture unit's (not used but eases porting)
// but I'm not doing 80 or bothering with GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS
GL_TEXTURE0,
GL_TEXTURE1,
GL_TEXTURE2,
GL_TEXTURE3,
GL_TEXTURE4,
GL_TEXTURE5,
GL_TEXTURE6,
GL_TEXTURE7,
//implemented glEnable options
GL_CULL_FACE,
GL_DEPTH_TEST,
GL_DEPTH_CLAMP,
GL_LINE_SMOOTH, // TODO correctly
GL_BLEND,
GL_COLOR_LOGIC_OP,
GL_POLYGON_OFFSET_FILL,
GL_SCISSOR_TEST,
GL_STENCIL_TEST,
//provoking vertex
GL_FIRST_VERTEX_CONVENTION,
GL_LAST_VERTEX_CONVENTION,
//point sprite stuff
GL_POINT_SPRITE_COORD_ORIGIN,
GL_UPPER_LEFT,
GL_LOWER_LEFT,
//front face determination/culling
GL_FRONT,
GL_BACK,
GL_FRONT_AND_BACK,
GL_CCW,
GL_CW,
// glLogicOp logic ops
GL_CLEAR,
GL_SET,
GL_COPY,
GL_COPY_INVERTED,
GL_NOOP,
GL_AND,
GL_NAND,
GL_OR,
GL_NOR,
GL_XOR,
GL_EQUIV,
GL_AND_REVERSE,
GL_AND_INVERTED,
GL_OR_REVERSE,
GL_OR_INVERTED,
GL_INVERT,
// glStencilOp
GL_KEEP,
//GL_ZERO, already defined in blend functions aggh
GL_REPLACE,
GL_INCR,
GL_INCR_WRAP,
GL_DECR,
GL_DECR_WRAP,
//GL_INVERT, // already defined in LogicOps
//data types
GL_UNSIGNED_BYTE,
GL_BYTE,
GL_BITMAP,
GL_UNSIGNED_SHORT,
GL_SHORT,
GL_UNSIGNED_INT,
GL_INT,
GL_FLOAT,
//glGetString info
GL_VENDOR,
GL_RENDERER,
GL_VERSION,
GL_SHADING_LANGUAGE_VERSION,
// glGet enums
GL_POLYGON_OFFSET_FACTOR,
GL_POLYGON_OFFSET_UNITS,
GL_POINT_SIZE,
GL_DEPTH_CLEAR_VALUE,
GL_DEPTH_RANGE,
GL_STENCIL_WRITE_MASK,
GL_STENCIL_REF,
GL_STENCIL_VALUE_MASK,
GL_STENCIL_FUNC,
GL_STENCIL_FAIL,
GL_STENCIL_PASS_DEPTH_FAIL,
GL_STENCIL_PASS_DEPTH_PASS,
GL_STENCIL_BACK_WRITE_MASK,
GL_STENCIL_BACK_REF,
GL_STENCIL_BACK_VALUE_MASK,
GL_STENCIL_BACK_FUNC,
GL_STENCIL_BACK_FAIL,
GL_STENCIL_BACK_PASS_DEPTH_FAIL,
GL_STENCIL_BACK_PASS_DEPTH_PASS,
GL_LOGIC_OP_MODE,
GL_BLEND_SRC_RGB,
GL_BLEND_SRC_ALPHA,
GL_BLEND_DST_RGB,
GL_BLEND_DST_ALPHA,
GL_BLEND_EQUATION_RGB,
GL_BLEND_EQUATION_ALPHA,
GL_CULL_FACE_MODE,
GL_FRONT_FACE,
GL_DEPTH_FUNC,
//GL_POINT_SPRITE_COORD_ORIGIN,
GL_PROVOKING_VERTEX,
GL_POLYGON_MODE,
//shader types etc. not used, just here for compatibility add what you
//need so you can use your OpenGL code with PortableGL with minimal changes
GL_COMPUTE_SHADER,
GL_VERTEX_SHADER,
GL_TESS_CONTROL_SHADER,
GL_TESS_EVALUATION_SHADER,
GL_GEOMETRY_SHADER,
GL_FRAGMENT_SHADER,
GL_INFO_LOG_LENGTH,
GL_COMPILE_STATUS,
GL_LINK_STATUS,
// buffer clearing selections are a mask so can't have overlap
// choosing arbitrary bits higher than all other constants in enum
GL_COLOR_BUFFER_BIT = 1 << 10,
GL_DEPTH_BUFFER_BIT = 1 << 11,
GL_STENCIL_BUFFER_BIT = 1 << 12
};
#define GL_FALSE 0
#define GL_TRUE 1
#define MAX_VERTICES 500000
#define GL_MAX_VERTEX_ATTRIBS 16
#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 64
#define GL_MAX_DRAW_BUFFERS 8
#define GL_MAX_COLOR_ATTACHMENTS 8
//TODO use prefix like GL_SMOOTH? PGL_SMOOTH?
enum { SMOOTH, FLAT, NOPERSPECTIVE };
//TODO NOT USED YET
typedef struct PerVertex {
vec4 gl_Position;
float gl_PointSize;
float gl_ClipDistance[6];
} PerVertex;
typedef struct Shader_Builtins
{
//PerVertex gl_PerVertex;
vec4 gl_Position;
GLint gl_InstanceID;
vec2 gl_PointCoord;
GLboolean gl_FrontFacing;
vec4 gl_FragCoord;
vec4 gl_FragColor;
//vec4 gl_FragData[GL_MAX_DRAW_BUFFERS];
float gl_FragDepth;
GLboolean discard;
} Shader_Builtins;
typedef void (*vert_func)(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms);
typedef void (*frag_func)(float* fs_input, Shader_Builtins* builtins, void* uniforms);
typedef struct glProgram
{
vert_func vertex_shader;
frag_func fragment_shader;
void* uniform;
int vs_output_size;
GLenum interpolation[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
// Need to come up with a better name to mean "I write to glFragDepth or discard
// pixels in this shader so you can't do pre-shader depth testing... not that I currently
// support that anyway at this point but maybe eventually
GLboolean fragdepth_or_discard;
GLboolean deleted;
} glProgram;
typedef struct glBuffer
{
/*
GLenum usage;
GLenum access;
GLint access_flags;
void* map_pointer;
GLsizei map_offset;
GLsizei map_length;
*/
GLsizei size;
GLenum type;
u8* data;
GLboolean deleted;
// true if the user uses one of the pgl data extension functions that
// doesn't copy the data.
// If true, PGL does not free it when deleting the buffer
GLboolean user_owned;
} glBuffer;
typedef struct glVertex_Attrib
{
GLint size; // number of components 1-4
GLenum type; // GL_FLOAT, default
GLsizei stride; //
GLsizei offset; //
GLboolean normalized;
unsigned int buf;
GLboolean enabled;
GLuint divisor;
} glVertex_Attrib;
void init_glVertex_Attrib(glVertex_Attrib* v);
//void init_glVertex_Attrib(glVertex_Attrib* v, GLint size, GLenum type, GLsizei stride, GLsizei offset, GLboolean normalized, Buffer* buf);
typedef struct glVertex_Array
{
glVertex_Attrib vertex_attribs[GL_MAX_VERTEX_ATTRIBS];
//GLuint n_array_bufs;
GLuint element_buffer;
GLboolean deleted;
} glVertex_Array;
void init_glVertex_Array(glVertex_Array* v);
typedef struct glTexture
{
unsigned int w;
unsigned int h;
unsigned int d;
int base_level;
// vec4 border_color; // no longer support borders not worth it
GLenum mag_filter;
GLenum min_filter;
GLenum wrap_s;
GLenum wrap_t;
GLenum wrap_r;
// TODO?
//GLenum datatype; // only support GL_UNSIGNED_BYTE so not worth having yet
GLenum format; // GL_RED, GL_RG, GL_RGB/BGR, GL_RGBA/BGRA
GLenum type; // GL_TEXTURE_UNBOUND, GL_TEXTURE_2D etc.
GLboolean deleted;
// TODO same meaning as in glBuffer
GLboolean user_owned;
u8* data;
} glTexture;
typedef struct glVertex
{
vec4 clip_space;
vec4 screen_space;
int clip_code;
int edge_flag;
float* vs_out;
} glVertex;
typedef struct glFramebuffer
{
u8* buf;
u8* lastrow; //better or worse than + h-1 every pixel draw?
size_t w;
size_t h;
} glFramebuffer;
typedef struct Vertex_Shader_output
{
int size;
GLenum* interpolation;
// TODO Should this be a vector? or just a pointer?
// All I currently use is the constructor, reserve and free...
// I could remove the rest of the cvector_float functions to save on bloat
// but still easily add back functions as needed...
//
// or like comment in init_glContext says just allocate to the max size and be done
cvector_float output_buf;
} Vertex_Shader_output;
typedef void (*draw_triangle_func)(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke);
#ifndef CVECTOR_glVertex_Array_H
#define CVECTOR_glVertex_Array_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glVertex_Array vector. */
typedef struct cvector_glVertex_Array
{
glVertex_Array* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glVertex_Array;
extern size_t CVEC_glVertex_Array_SZ;
int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity);
int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num);
cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity);
cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num);
int cvec_copyc_glVertex_Array(void* dest, void* src);
int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src);
int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a);
glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec);
int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num);
int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a);
int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num);
glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a);
void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end);
int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size);
int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size);
void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val);
void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val);
glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec);
void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec);
void cvec_free_glVertex_Array_heap(void* vec);
void cvec_free_glVertex_Array(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glVertex_Array_H */
#endif
#ifdef CVECTOR_glVertex_Array_IMPLEMENTATION
size_t CVEC_glVertex_Array_SZ = 50;
#define CVEC_glVertex_Array_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity)
{
cvector_glVertex_Array* vec;
if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num)
{
cvector_glVertex_Array* vec;
if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glVertex_Array_SZ;
vec->size = num;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num);
return vec;
}
int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num)
{
vec->capacity = num + CVEC_glVertex_Array_SZ;
vec->size = num;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num);
return 1;
}
int cvec_copyc_glVertex_Array(void* dest, void* src)
{
cvector_glVertex_Array* vec1 = (cvector_glVertex_Array*)dest;
cvector_glVertex_Array* vec2 = (cvector_glVertex_Array*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glVertex_Array(vec1, vec2);
}
int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src)
{
glVertex_Array* tmp = NULL;
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex_Array));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec)
{
return vec->a[--vec->size];
}
glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ;
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ;
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex_Array));
vec->size += num;
return 1;
}
glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a)
{
glVertex_Array tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex_Array));
vec->size -= d;
}
int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size)
{
glVertex_Array* tmp;
if (vec->capacity < size) {
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*(size+CVEC_glVertex_Array_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glVertex_Array_SZ;
}
return 1;
}
int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size)
{
glVertex_Array* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec) { vec->size = 0; }
void cvec_free_glVertex_Array_heap(void* vec)
{
cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glVertex_Array(void* vec)
{
cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#ifndef CVECTOR_glBuffer_H
#define CVECTOR_glBuffer_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glBuffer vector. */
typedef struct cvector_glBuffer
{
glBuffer* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glBuffer;
extern size_t CVEC_glBuffer_SZ;
int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity);
int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num);
cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity);
cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num);
int cvec_copyc_glBuffer(void* dest, void* src);
int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src);
int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a);
glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec);
int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num);
int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a);
int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num);
glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a);
void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end);
int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size);
int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size);
void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val);
void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val);
glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec);
void cvec_clear_glBuffer(cvector_glBuffer* vec);
void cvec_free_glBuffer_heap(void* vec);
void cvec_free_glBuffer(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glBuffer_H */
#endif
#ifdef CVECTOR_glBuffer_IMPLEMENTATION
size_t CVEC_glBuffer_SZ = 50;
#define CVEC_glBuffer_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity)
{
cvector_glBuffer* vec;
if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num)
{
cvector_glBuffer* vec;
if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glBuffer_SZ;
vec->size = num;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num);
return vec;
}
int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num)
{
vec->capacity = num + CVEC_glBuffer_SZ;
vec->size = num;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num);
return 1;
}
int cvec_copyc_glBuffer(void* dest, void* src)
{
cvector_glBuffer* vec1 = (cvector_glBuffer*)dest;
cvector_glBuffer* vec2 = (cvector_glBuffer*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glBuffer(vec1, vec2);
}
int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src)
{
glBuffer* tmp = NULL;
if (!(tmp = (glBuffer*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glBuffer));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity);
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec)
{
return vec->a[--vec->size];
}
glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ;
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity);
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ;
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glBuffer));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glBuffer));
vec->size += num;
return 1;
}
glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a)
{
glBuffer tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glBuffer));
vec->size -= d;
}
int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size)
{
glBuffer* tmp;
if (vec->capacity < size) {
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*(size+CVEC_glBuffer_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glBuffer_SZ;
}
return 1;
}
int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size)
{
glBuffer* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glBuffer(cvector_glBuffer* vec) { vec->size = 0; }
void cvec_free_glBuffer_heap(void* vec)
{
cvector_glBuffer* tmp = (cvector_glBuffer*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glBuffer(void* vec)
{
cvector_glBuffer* tmp = (cvector_glBuffer*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#ifndef CVECTOR_glTexture_H
#define CVECTOR_glTexture_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glTexture vector. */
typedef struct cvector_glTexture
{
glTexture* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glTexture;
extern size_t CVEC_glTexture_SZ;
int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity);
int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num);
cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity);
cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num);
int cvec_copyc_glTexture(void* dest, void* src);
int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src);
int cvec_push_glTexture(cvector_glTexture* vec, glTexture a);
glTexture cvec_pop_glTexture(cvector_glTexture* vec);
int cvec_extend_glTexture(cvector_glTexture* vec, size_t num);
int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a);
int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num);
glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a);
void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end);
int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size);
int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size);
void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val);
void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val);
glTexture* cvec_back_glTexture(cvector_glTexture* vec);
void cvec_clear_glTexture(cvector_glTexture* vec);
void cvec_free_glTexture_heap(void* vec);
void cvec_free_glTexture(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glTexture_H */
#endif
#ifdef CVECTOR_glTexture_IMPLEMENTATION
size_t CVEC_glTexture_SZ = 50;
#define CVEC_glTexture_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity)
{
cvector_glTexture* vec;
if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num)
{
cvector_glTexture* vec;
if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glTexture_SZ;
vec->size = num;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num);
return vec;
}
int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num)
{
vec->capacity = num + CVEC_glTexture_SZ;
vec->size = num;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num);
return 1;
}
int cvec_copyc_glTexture(void* dest, void* src)
{
cvector_glTexture* vec1 = (cvector_glTexture*)dest;
cvector_glTexture* vec2 = (cvector_glTexture*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glTexture(vec1, vec2);
}
int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src)
{
glTexture* tmp = NULL;
if (!(tmp = (glTexture*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glTexture));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glTexture(cvector_glTexture* vec, glTexture a)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity);
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glTexture cvec_pop_glTexture(cvector_glTexture* vec)
{
return vec->a[--vec->size];
}
glTexture* cvec_back_glTexture(cvector_glTexture* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glTexture(cvector_glTexture* vec, size_t num)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glTexture_SZ;
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity);
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glTexture_SZ;
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glTexture));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glTexture));
vec->size += num;
return 1;
}
glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a)
{
glTexture tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glTexture));
vec->size -= d;
}
int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size)
{
glTexture* tmp;
if (vec->capacity < size) {
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*(size+CVEC_glTexture_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glTexture_SZ;
}
return 1;
}
int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size)
{
glTexture* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glTexture(cvector_glTexture* vec) { vec->size = 0; }
void cvec_free_glTexture_heap(void* vec)
{
cvector_glTexture* tmp = (cvector_glTexture*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glTexture(void* vec)
{
cvector_glTexture* tmp = (cvector_glTexture*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#ifndef CVECTOR_glProgram_H
#define CVECTOR_glProgram_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glProgram vector. */
typedef struct cvector_glProgram
{
glProgram* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glProgram;
extern size_t CVEC_glProgram_SZ;
int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity);
int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num);
cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity);
cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num);
int cvec_copyc_glProgram(void* dest, void* src);
int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src);
int cvec_push_glProgram(cvector_glProgram* vec, glProgram a);
glProgram cvec_pop_glProgram(cvector_glProgram* vec);
int cvec_extend_glProgram(cvector_glProgram* vec, size_t num);
int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a);
int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num);
glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a);
void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end);
int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size);
int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size);
void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val);
void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val);
glProgram* cvec_back_glProgram(cvector_glProgram* vec);
void cvec_clear_glProgram(cvector_glProgram* vec);
void cvec_free_glProgram_heap(void* vec);
void cvec_free_glProgram(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glProgram_H */
#endif
#ifdef CVECTOR_glProgram_IMPLEMENTATION
size_t CVEC_glProgram_SZ = 50;
#define CVEC_glProgram_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity)
{
cvector_glProgram* vec;
if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num)
{
cvector_glProgram* vec;
if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glProgram_SZ;
vec->size = num;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num);
return vec;
}
int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num)
{
vec->capacity = num + CVEC_glProgram_SZ;
vec->size = num;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num);
return 1;
}
int cvec_copyc_glProgram(void* dest, void* src)
{
cvector_glProgram* vec1 = (cvector_glProgram*)dest;
cvector_glProgram* vec2 = (cvector_glProgram*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glProgram(vec1, vec2);
}
int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src)
{
glProgram* tmp = NULL;
if (!(tmp = (glProgram*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glProgram));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glProgram(cvector_glProgram* vec, glProgram a)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity);
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glProgram cvec_pop_glProgram(cvector_glProgram* vec)
{
return vec->a[--vec->size];
}
glProgram* cvec_back_glProgram(cvector_glProgram* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glProgram(cvector_glProgram* vec, size_t num)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glProgram_SZ;
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity);
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glProgram_SZ;
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glProgram));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glProgram));
vec->size += num;
return 1;
}
glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a)
{
glProgram tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glProgram));
vec->size -= d;
}
int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size)
{
glProgram* tmp;
if (vec->capacity < size) {
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*(size+CVEC_glProgram_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glProgram_SZ;
}
return 1;
}
int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size)
{
glProgram* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glProgram(cvector_glProgram* vec) { vec->size = 0; }
void cvec_free_glProgram_heap(void* vec)
{
cvector_glProgram* tmp = (cvector_glProgram*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glProgram(void* vec)
{
cvector_glProgram* tmp = (cvector_glProgram*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#ifndef CVECTOR_glVertex_H
#define CVECTOR_glVertex_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glVertex vector. */
typedef struct cvector_glVertex
{
glVertex* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glVertex;
extern size_t CVEC_glVertex_SZ;
int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity);
int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num);
cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity);
cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num);
int cvec_copyc_glVertex(void* dest, void* src);
int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src);
int cvec_push_glVertex(cvector_glVertex* vec, glVertex a);
glVertex cvec_pop_glVertex(cvector_glVertex* vec);
int cvec_extend_glVertex(cvector_glVertex* vec, size_t num);
int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a);
int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num);
glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a);
void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end);
int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size);
int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size);
void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val);
void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val);
glVertex* cvec_back_glVertex(cvector_glVertex* vec);
void cvec_clear_glVertex(cvector_glVertex* vec);
void cvec_free_glVertex_heap(void* vec);
void cvec_free_glVertex(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glVertex_H */
#endif
#ifdef CVECTOR_glVertex_IMPLEMENTATION
size_t CVEC_glVertex_SZ = 50;
#define CVEC_glVertex_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity)
{
cvector_glVertex* vec;
if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num)
{
cvector_glVertex* vec;
if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glVertex_SZ;
vec->size = num;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num);
return vec;
}
int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num)
{
vec->capacity = num + CVEC_glVertex_SZ;
vec->size = num;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num);
return 1;
}
int cvec_copyc_glVertex(void* dest, void* src)
{
cvector_glVertex* vec1 = (cvector_glVertex*)dest;
cvector_glVertex* vec2 = (cvector_glVertex*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glVertex(vec1, vec2);
}
int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src)
{
glVertex* tmp = NULL;
if (!(tmp = (glVertex*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glVertex(cvector_glVertex* vec, glVertex a)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glVertex cvec_pop_glVertex(cvector_glVertex* vec)
{
return vec->a[--vec->size];
}
glVertex* cvec_back_glVertex(cvector_glVertex* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glVertex(cvector_glVertex* vec, size_t num)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_SZ;
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_SZ;
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex));
vec->size += num;
return 1;
}
glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a)
{
glVertex tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex));
vec->size -= d;
}
int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size)
{
glVertex* tmp;
if (vec->capacity < size) {
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*(size+CVEC_glVertex_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glVertex_SZ;
}
return 1;
}
int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size)
{
glVertex* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glVertex(cvector_glVertex* vec) { vec->size = 0; }
void cvec_free_glVertex_heap(void* vec)
{
cvector_glVertex* tmp = (cvector_glVertex*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glVertex(void* vec)
{
cvector_glVertex* tmp = (cvector_glVertex*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
typedef struct glContext
{
mat4 vp_mat;
int x_min, y_min;
size_t x_max, y_max;
cvector_glVertex_Array vertex_arrays;
cvector_glBuffer buffers;
cvector_glTexture textures;
cvector_glProgram programs;
GLuint cur_vertex_array;
GLuint bound_buffers[GL_NUM_BUFFER_TYPES-GL_ARRAY_BUFFER];
GLuint bound_textures[GL_NUM_TEXTURE_TYPES-GL_TEXTURE_UNBOUND-1];
GLuint cur_texture2D;
GLuint cur_program;
GLenum error;
void* uniform;
vec4 vertex_attribs_vs[GL_MAX_VERTEX_ATTRIBS];
Shader_Builtins builtins;
Vertex_Shader_output vs_output;
float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
GLboolean depth_test;
GLboolean line_smooth;
GLboolean cull_face;
GLboolean fragdepth_or_discard;
GLboolean depth_clamp;
GLboolean depth_mask;
GLboolean blend;
GLboolean logic_ops;
GLboolean poly_offset;
GLboolean scissor_test;
// stencil test requires a lot of state, especially for
// something that I think will rarely be used... is it even worth having?
GLboolean stencil_test;
GLuint stencil_writemask;
GLuint stencil_writemask_back;
GLint stencil_ref;
GLint stencil_ref_back;
GLuint stencil_valuemask;
GLuint stencil_valuemask_back;
GLenum stencil_func;
GLenum stencil_func_back;
GLenum stencil_sfail;
GLenum stencil_dpfail;
GLenum stencil_dppass;
GLenum stencil_sfail_back;
GLenum stencil_dpfail_back;
GLenum stencil_dppass_back;
GLenum logic_func;
GLenum blend_sfactor;
GLenum blend_dfactor;
GLenum blend_equation;
GLenum cull_mode;
GLenum front_face;
GLenum poly_mode_front;
GLenum poly_mode_back;
GLenum depth_func;
GLenum point_spr_origin;
GLenum provoking_vert;
// I really need to decide whether to use GLtypes or plain C types
GLfloat poly_factor;
GLfloat poly_units;
GLint scissor_lx;
GLint scissor_ly;
GLsizei scissor_ux;
GLsizei scissor_uy;
GLint unpack_alignment;
GLint pack_alignment;
GLint clear_stencil;
Color clear_color;
vec4 blend_color;
GLfloat point_size;
GLfloat clear_depth;
GLfloat depth_range_near;
GLfloat depth_range_far;
draw_triangle_func draw_triangle_front;
draw_triangle_func draw_triangle_back;
glFramebuffer zbuf;
glFramebuffer back_buffer;
glFramebuffer stencil_buf;
int user_alloced_backbuf;
int bitdepth;
u32 Rmask;
u32 Gmask;
u32 Bmask;
u32 Amask;
int Rshift;
int Gshift;
int Bshift;
int Ashift;
cvector_glVertex glverts;
} glContext;
/*************************************
* GLSL(ish) functions
*************************************/
float clampf_01(float f);
float clampf(float f, float min, float max);
int clampi(int i, int min, int max);
//shader texture functions
vec4 texture1D(GLuint tex, float x);
vec4 texture2D(GLuint tex, float x, float y);
vec4 texture3D(GLuint tex, float x, float y, float z);
vec4 texture2DArray(GLuint tex, float x, float y, int z);
vec4 texture_rect(GLuint tex, float x, float y);
vec4 texture_cubemap(GLuint texture, float x, float y, float z);
// TODO leave these non gl* functions here? prefix with pgl?
int init_glContext(glContext* c, u32** back_buffer, int w, int h, int bitdepth, u32 Rmask, u32 Gmask, u32 Bmask, u32 Amask);
void free_glContext(glContext* context);
void set_glContext(glContext* context);
void* pglResizeFramebuffer(size_t w, size_t h);
void glViewport(int x, int y, GLsizei width, GLsizei height);
GLubyte* glGetString(GLenum name);
GLenum glGetError();
void glGetBooleanv(GLenum pname, GLboolean* params);
void glGetDoublev(GLenum pname, GLdouble* params);
void glGetFloatv(GLenum pname, GLfloat* params);
void glGetIntegerv(GLenum pname, GLint* params);
void glGetInteger64v(GLenum pname, GLint64* params);
GLboolean glIsEnabled(GLenum cap);
void glClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha);
void glClearDepth(GLclampf depth);
void glDepthFunc(GLenum func);
void glDepthRange(GLclampf nearVal, GLclampf farVal);
void glDepthMask(GLboolean flag);
void glBlendFunc(GLenum sfactor, GLenum dfactor);
void glBlendEquation(GLenum mode);
void glBlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha);
void glClear(GLbitfield mask);
void glProvokingVertex(GLenum provokeMode);
void glEnable(GLenum cap);
void glDisable(GLenum cap);
void glCullFace(GLenum mode);
void glFrontFace(GLenum mode);
void glPolygonMode(GLenum face, GLenum mode);
void glPointSize(GLfloat size);
void glPointParameteri(GLenum pname, GLint param);
void glLineWidth(GLfloat width);
void glLogicOp(GLenum opcode);
void glPolygonOffset(GLfloat factor, GLfloat units);
void glScissor(GLint x, GLint y, GLsizei width, GLsizei height);
void glStencilFunc(GLenum func, GLint ref, GLuint mask);
void glStencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask);
void glStencilOp(GLenum sfail, GLenum dpfail, GLenum dppass);
void glStencilOpSeparate(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
void glClearStencil(GLint s);
void glStencilMask(GLuint mask);
void glStencilMaskSeparate(GLenum face, GLuint mask);
//textures
void glGenTextures(GLsizei n, GLuint* textures);
void glDeleteTextures(GLsizei n, GLuint* textures);
void glBindTexture(GLenum target, GLuint texture);
void glActiveTexture(GLenum texture);
void glTexParameteri(GLenum target, GLenum pname, GLint param);
void glTexParameterfv(GLenum target, GLenum pname, const GLfloat* params);
void glPixelStorei(GLenum pname, GLint param);
void glTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data);
void glTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data);
void glTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data);
void glTexSubImage1D(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid* data);
void glTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* data);
void glTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data);
void glGenVertexArrays(GLsizei n, GLuint* arrays);
void glDeleteVertexArrays(GLsizei n, const GLuint* arrays);
void glBindVertexArray(GLuint array);
void glGenBuffers(GLsizei n, GLuint* buffers);
void glDeleteBuffers(GLsizei n, const GLuint* buffers);
void glBindBuffer(GLenum target, GLuint buffer);
void glBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage);
void glBufferSubData(GLenum target, GLsizei offset, GLsizei size, const GLvoid* data);
void glVertexAttribPointer(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLsizei offset);
void glVertexAttribDivisor(GLuint index, GLuint divisor);
void glEnableVertexAttribArray(GLuint index);
void glDisableVertexAttribArray(GLuint index);
void glDrawArrays(GLenum mode, GLint first, GLsizei count);
void glDrawElements(GLenum mode, GLsizei count, GLenum type, GLsizei offset);
void glDrawArraysInstanced(GLenum mode, GLint first, GLsizei count, GLsizei primcount);
void glDrawArraysInstancedBaseInstance(GLenum mode, GLint first, GLsizei count, GLsizei primcount, GLuint baseinstance);
void glDrawElementsInstanced(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei primcount);
void glDrawElementsInstancedBaseInstance(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei primcount, GLuint baseinstance);
//shaders
GLuint pglCreateProgram(vert_func vertex_shader, frag_func fragment_shader, GLsizei n, GLenum* interpolation, GLboolean fragdepth_or_discard);
void glDeleteProgram(GLuint program);
void glUseProgram(GLuint program);
void pglSetUniform(void* uniform);
// Stubs to let real OpenGL libs compile with minimal modifications/ifdefs
// add what you need
void glGenerateMipmap(GLenum target);
void glGetDoublev(GLenum pname, GLdouble* params);
void glGetInteger64v(GLenum pname, GLint64* params);
// Framebuffers/Renderbuffers
void glGenFramebuffers(GLsizei n, GLuint* ids);
void glBindFramebuffer(GLenum target, GLuint framebuffer);
void glDeleteFramebuffers(GLsizei n, GLuint* framebuffers);
void glFramebufferTexture(GLenum target, GLenum attachment, GLuint texture, GLint level);
void glFramebufferTexture1D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
void glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
void glFramebufferTexture3D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint layer);
GLboolean glIsFramebuffer(GLuint framebuffer);
void glGenRenderbuffers(GLsizei n, GLuint* renderbuffers);
void glBindRenderbuffer(GLenum target, GLuint renderbuffer);
void glDeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers);
void glRenderbufferStorage(GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
GLboolean glIsRenderbuffer(GLuint renderbuffer);
void glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
GLenum glCheckFramebufferStatus(GLenum target);
void glGetProgramiv(GLuint program, GLenum pname, GLint* params);
void glGetProgramInfoLog(GLuint program, GLsizei maxLength, GLsizei* length, GLchar* infoLog);
void glAttachShader(GLuint program, GLuint shader);
void glCompileShader(GLuint shader);
void glGetShaderInfoLog(GLuint shader, GLsizei maxLength, GLsizei* length, GLchar* infoLog);
// use pglCreateProgram()
GLuint glCreateProgram();
void glLinkProgram(GLuint program);
void glShaderSource(GLuint shader, GLsizei count, const GLchar** string, const GLint* length);
void glGetShaderiv(GLuint shader, GLenum pname, GLint* params);
GLuint glCreateShader(GLenum shaderType);
void glDeleteShader(GLuint shader);
void glDetachShader(GLuint program, GLuint shader);
GLint glGetUniformLocation(GLuint program, const GLchar* name);
GLint glGetAttribLocation(GLuint program, const GLchar* name);
void* glMapBuffer(GLenum target, GLenum access);
void* glMapNamedBuffer(GLuint buffer, GLenum access);
GLboolean glUnmapBuffer(GLenum target);
GLboolean glUnmapNamedBuffer(GLuint buffer);
void glUniform1f(GLint location, GLfloat v0);
void glUniform2f(GLint location, GLfloat v0, GLfloat v1);
void glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
void glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
void glUniform1i(GLint location, GLint v0);
void glUniform2i(GLint location, GLint v0, GLint v1);
void glUniform3i(GLint location, GLint v0, GLint v1, GLint v2);
void glUniform4i(GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
void glUniform1ui(GLuint location, GLuint v0);
void glUniform2ui(GLuint location, GLuint v0, GLuint v1);
void glUniform3ui(GLuint location, GLuint v0, GLuint v1, GLuint v2);
void glUniform4ui(GLuint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
void glUniform1fv(GLint location, GLsizei count, const GLfloat* value);
void glUniform2fv(GLint location, GLsizei count, const GLfloat* value);
void glUniform3fv(GLint location, GLsizei count, const GLfloat* value);
void glUniform4fv(GLint location, GLsizei count, const GLfloat* value);
void glUniform1iv(GLint location, GLsizei count, const GLint* value);
void glUniform2iv(GLint location, GLsizei count, const GLint* value);
void glUniform3iv(GLint location, GLsizei count, const GLint* value);
void glUniform4iv(GLint location, GLsizei count, const GLint* value);
void glUniform1uiv(GLint location, GLsizei count, const GLuint* value);
void glUniform2uiv(GLint location, GLsizei count, const GLuint* value);
void glUniform3uiv(GLint location, GLsizei count, const GLuint* value);
void glUniform4uiv(GLint location, GLsizei count, const GLuint* value);
void glUniformMatrix2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix2x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix2x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void glUniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);
void pglClearScreen();
//This isn't possible in regular OpenGL, changing the interpolation of vs output of
//an existing shader. You'd have to switch between 2 almost identical shaders.
void pglSetInterp(GLsizei n, GLenum* interpolation);
//TODO
//pglDrawRect(x, y, w, h)
//pglDrawPoint(x, y)
void pglDrawFrame();
// TODO should these be called pglMapped* since that's what they do? I don't think so, since it's too different from actual spec for mapped buffers
void pglBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage);
void pglTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data);
void pglTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data);
void pglTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data);
// I could make these return the data?
void pglGetBufferData(GLuint buffer, GLvoid** data);
void pglGetTextureData(GLuint texture, GLvoid** data);
void put_pixel(Color color, int x, int y);
//Should I have it take a glFramebuffer as paramater?
void put_line(Color the_color, float x1, float y1, float x2, float y2);
void put_triangle(Color c1, Color c2, Color c3, vec2 p1, vec2 p2, vec2 p3);
#ifdef __cplusplus
}
#endif
// end GL_H
#endif
#ifdef PORTABLEGL_IMPLEMENTATION
extern inline float rsw_randf();
extern inline float rsw_randf_range(float min, float max);
extern inline vec2 make_vec2(float x, float y);
extern inline vec3 make_vec3(float x, float y, float z);
extern inline vec4 make_vec4(float x, float y, float z, float w);
extern inline ivec2 make_ivec2(int x, int y);
extern inline ivec3 make_ivec3(int x, int y, int z);
extern inline ivec4 make_ivec4(int x, int y, int z, int w);
extern inline vec2 negate_vec2(vec2 v);
extern inline vec3 negate_vec3(vec3 v);
extern inline vec4 negate_vec4(vec4 v);
extern inline void fprint_vec2(FILE* f, vec2 v, const char* append);
extern inline void fprint_vec3(FILE* f, vec3 v, const char* append);
extern inline void fprint_vec4(FILE* f, vec4 v, const char* append);
extern inline void print_vec2(vec2 v, const char* append);
extern inline void print_vec3(vec3 v, const char* append);
extern inline void print_vec4(vec4 v, const char* append);
extern inline int fread_vec2(FILE* f, vec2* v);
extern inline int fread_vec3(FILE* f, vec3* v);
extern inline int fread_vec4(FILE* f, vec4* v);
extern inline void fprint_dvec2(FILE* f, dvec2 v, const char* append);
extern inline void fprint_dvec3(FILE* f, dvec3 v, const char* append);
extern inline void fprint_dvec4(FILE* f, dvec4 v, const char* append);
extern inline int fread_dvec2(FILE* f, dvec2* v);
extern inline int fread_dvec3(FILE* f, dvec3* v);
extern inline int fread_dvec4(FILE* f, dvec4* v);
extern inline void fprint_ivec2(FILE* f, ivec2 v, const char* append);
extern inline void fprint_ivec3(FILE* f, ivec3 v, const char* append);
extern inline void fprint_ivec4(FILE* f, ivec4 v, const char* append);
extern inline int fread_ivec2(FILE* f, ivec2* v);
extern inline int fread_ivec3(FILE* f, ivec3* v);
extern inline int fread_ivec4(FILE* f, ivec4* v);
extern inline void fprint_uvec2(FILE* f, uvec2 v, const char* append);
extern inline void fprint_uvec3(FILE* f, uvec3 v, const char* append);
extern inline void fprint_uvec4(FILE* f, uvec4 v, const char* append);
extern inline int fread_uvec2(FILE* f, uvec2* v);
extern inline int fread_uvec3(FILE* f, uvec3* v);
extern inline int fread_uvec4(FILE* f, uvec4* v);
extern inline float length_vec2(vec2 a);
extern inline float length_vec3(vec3 a);
extern inline vec2 norm_vec2(vec2 a);
extern inline vec3 norm_vec3(vec3 a);
extern inline void normalize_vec2(vec2* a);
extern inline void normalize_vec3(vec3* a);
extern inline vec2 add_vec2s(vec2 a, vec2 b);
extern inline vec3 add_vec3s(vec3 a, vec3 b);
extern inline vec4 add_vec4s(vec4 a, vec4 b);
extern inline vec2 sub_vec2s(vec2 a, vec2 b);
extern inline vec3 sub_vec3s(vec3 a, vec3 b);
extern inline vec4 sub_vec4s(vec4 a, vec4 b);
extern inline vec2 mult_vec2s(vec2 a, vec2 b);
extern inline vec3 mult_vec3s(vec3 a, vec3 b);
extern inline vec4 mult_vec4s(vec4 a, vec4 b);
extern inline vec2 div_vec2s(vec2 a, vec2 b);
extern inline vec3 div_vec3s(vec3 a, vec3 b);
extern inline vec4 div_vec4s(vec4 a, vec4 b);
extern inline float dot_vec2s(vec2 a, vec2 b);
extern inline float dot_vec3s(vec3 a, vec3 b);
extern inline float dot_vec4s(vec4 a, vec4 b);
extern inline vec2 scale_vec2(vec2 a, float s);
extern inline vec3 scale_vec3(vec3 a, float s);
extern inline vec4 scale_vec4(vec4 a, float s);
extern inline int equal_vec2s(vec2 a, vec2 b);
extern inline int equal_vec3s(vec3 a, vec3 b);
extern inline int equal_vec4s(vec4 a, vec4 b);
extern inline int equal_epsilon_vec2s(vec2 a, vec2 b, float epsilon);
extern inline int equal_epsilon_vec3s(vec3 a, vec3 b, float epsilon);
extern inline int equal_epsilon_vec4s(vec4 a, vec4 b, float epsilon);
extern inline vec2 vec4_to_vec2(vec4 a);
extern inline vec3 vec4_to_vec3(vec4 a);
extern inline vec2 vec4_to_vec2h(vec4 a);
extern inline vec3 vec4_to_vec3h(vec4 a);
extern inline vec3 cross_product(const vec3 u, const vec3 v);
extern inline float angle_between_vec3(const vec3 u, const vec3 v);
extern inline vec2 x_mat2(mat2 m);
extern inline vec2 y_mat2(mat2 m);
extern inline vec2 c1_mat2(mat2 m);
extern inline vec2 c2_mat2(mat2 m);
extern inline void setc1_mat2(mat2 m, vec2 v);
extern inline void setc2_mat2(mat2 m, vec2 v);
extern inline void setx_mat2(mat2 m, vec2 v);
extern inline void sety_mat2(mat2 m, vec2 v);
extern inline vec3 x_mat3(mat3 m);
extern inline vec3 y_mat3(mat3 m);
extern inline vec3 z_mat3(mat3 m);
extern inline vec3 c1_mat3(mat3 m);
extern inline vec3 c2_mat3(mat3 m);
extern inline vec3 c3_mat3(mat3 m);
extern inline void setc1_mat3(mat3 m, vec3 v);
extern inline void setc2_mat3(mat3 m, vec3 v);
extern inline void setc3_mat3(mat3 m, vec3 v);
extern inline void setx_mat3(mat3 m, vec3 v);
extern inline void sety_mat3(mat3 m, vec3 v);
extern inline void setz_mat3(mat3 m, vec3 v);
extern inline vec4 c1_mat4(mat4 m);
extern inline vec4 c2_mat4(mat4 m);
extern inline vec4 c3_mat4(mat4 m);
extern inline vec4 c4_mat4(mat4 m);
extern inline vec4 x_mat4(mat4 m);
extern inline vec4 y_mat4(mat4 m);
extern inline vec4 z_mat4(mat4 m);
extern inline vec4 w_mat4(mat4 m);
extern inline void setc1_mat4v3(mat4 m, vec3 v);
extern inline void setc2_mat4v3(mat4 m, vec3 v);
extern inline void setc3_mat4v3(mat4 m, vec3 v);
extern inline void setc4_mat4v3(mat4 m, vec3 v);
extern inline void setc1_mat4v4(mat4 m, vec4 v);
extern inline void setc2_mat4v4(mat4 m, vec4 v);
extern inline void setc3_mat4v4(mat4 m, vec4 v);
extern inline void setc4_mat4v4(mat4 m, vec4 v);
extern inline void setx_mat4v3(mat4 m, vec3 v);
extern inline void sety_mat4v3(mat4 m, vec3 v);
extern inline void setz_mat4v3(mat4 m, vec3 v);
extern inline void setw_mat4v3(mat4 m, vec3 v);
extern inline void setx_mat4v4(mat4 m, vec4 v);
extern inline void sety_mat4v4(mat4 m, vec4 v);
extern inline void setz_mat4v4(mat4 m, vec4 v);
extern inline void setw_mat4v4(mat4 m, vec4 v);
extern inline void fprint_mat2(FILE* f, mat2 m, const char* append);
extern inline void fprint_mat3(FILE* f, mat3 m, const char* append);
extern inline void fprint_mat4(FILE* f, mat4 m, const char* append);
extern inline void print_mat2(mat2 m, const char* append);
extern inline void print_mat3(mat3 m, const char* append);
extern inline void print_mat4(mat4 m, const char* append);
extern inline vec2 mult_mat2_vec2(mat2 m, vec2 v);
extern inline vec3 mult_mat3_vec3(mat3 m, vec3 v);
extern inline vec4 mult_mat4_vec4(mat4 m, vec4 v);
extern inline void scale_mat3(mat3 m, float x, float y, float z);
extern inline void scale_mat4(mat4 m, float x, float y, float z);
extern inline void translation_mat4(mat4 m, float x, float y, float z);
extern inline void extract_rotation_mat4(mat3 dst, mat4 src, int normalize);
extern inline Color make_Color(u8 red, u8 green, u8 blue, u8 alpha);
extern inline Color vec4_to_Color(vec4 v);
extern inline void print_Color(Color c, const char* append);
extern inline vec4 Color_to_vec4(Color c);
extern inline Line make_Line(float x1, float y1, float x2, float y2);
extern inline float line_func(Line* line, float x, float y);
extern inline float line_findy(Line* line, float x);
extern inline float line_findx(Line* line, float y);
void load_rotation_mat3(mat3 mat, vec3 v, float angle)
{
float s, c;
float xx, yy, zz, xy, yz, zx, xs, ys, zs, one_c;
s = sin(angle);
c = cos(angle);
// Rotation matrix is normalized
normalize_vec3(&v);
xx = v.x * v.x;
yy = v.y * v.y;
zz = v.z * v.z;
xy = v.x * v.y;
yz = v.y * v.z;
zx = v.z * v.x;
xs = v.x * s;
ys = v.y * s;
zs = v.z * s;
one_c = 1.0f - c;
#ifndef ROW_MAJOR
mat[0] = (one_c * xx) + c;
mat[3] = (one_c * xy) - zs;
mat[6] = (one_c * zx) + ys;
mat[1] = (one_c * xy) + zs;
mat[4] = (one_c * yy) + c;
mat[7] = (one_c * yz) - xs;
mat[2] = (one_c * zx) - ys;
mat[5] = (one_c * yz) + xs;
mat[8] = (one_c * zz) + c;
#else
mat[0] = (one_c * xx) + c;
mat[1] = (one_c * xy) - zs;
mat[2] = (one_c * zx) + ys;
mat[3] = (one_c * xy) + zs;
mat[4] = (one_c * yy) + c;
mat[5] = (one_c * yz) - xs;
mat[6] = (one_c * zx) - ys;
mat[7] = (one_c * yz) + xs;
mat[8] = (one_c * zz) + c;
#endif
}
/*
* mat4
*/
//TODO use restrict?
void mult_mat4_mat4(mat4 c, mat4 a, mat4 b)
{
#ifndef ROW_MAJOR
c[ 0] = a[0]*b[ 0] + a[4]*b[ 1] + a[8]*b[ 2] + a[12]*b[ 3];
c[ 4] = a[0]*b[ 4] + a[4]*b[ 5] + a[8]*b[ 6] + a[12]*b[ 7];
c[ 8] = a[0]*b[ 8] + a[4]*b[ 9] + a[8]*b[10] + a[12]*b[11];
c[12] = a[0]*b[12] + a[4]*b[13] + a[8]*b[14] + a[12]*b[15];
c[ 1] = a[1]*b[ 0] + a[5]*b[ 1] + a[9]*b[ 2] + a[13]*b[ 3];
c[ 5] = a[1]*b[ 4] + a[5]*b[ 5] + a[9]*b[ 6] + a[13]*b[ 7];
c[ 9] = a[1]*b[ 8] + a[5]*b[ 9] + a[9]*b[10] + a[13]*b[11];
c[13] = a[1]*b[12] + a[5]*b[13] + a[9]*b[14] + a[13]*b[15];
c[ 2] = a[2]*b[ 0] + a[6]*b[ 1] + a[10]*b[ 2] + a[14]*b[ 3];
c[ 6] = a[2]*b[ 4] + a[6]*b[ 5] + a[10]*b[ 6] + a[14]*b[ 7];
c[10] = a[2]*b[ 8] + a[6]*b[ 9] + a[10]*b[10] + a[14]*b[11];
c[14] = a[2]*b[12] + a[6]*b[13] + a[10]*b[14] + a[14]*b[15];
c[ 3] = a[3]*b[ 0] + a[7]*b[ 1] + a[11]*b[ 2] + a[15]*b[ 3];
c[ 7] = a[3]*b[ 4] + a[7]*b[ 5] + a[11]*b[ 6] + a[15]*b[ 7];
c[11] = a[3]*b[ 8] + a[7]*b[ 9] + a[11]*b[10] + a[15]*b[11];
c[15] = a[3]*b[12] + a[7]*b[13] + a[11]*b[14] + a[15]*b[15];
#else
c[0] = a[0]*b[0] + a[1]*b[4] + a[2]*b[8] + a[3]*b[12];
c[1] = a[0]*b[1] + a[1]*b[5] + a[2]*b[9] + a[3]*b[13];
c[2] = a[0]*b[2] + a[1]*b[6] + a[2]*b[10] + a[3]*b[14];
c[3] = a[0]*b[3] + a[1]*b[7] + a[2]*b[11] + a[3]*b[15];
c[4] = a[4]*b[0] + a[5]*b[4] + a[6]*b[8] + a[7]*b[12];
c[5] = a[4]*b[1] + a[5]*b[5] + a[6]*b[9] + a[7]*b[13];
c[6] = a[4]*b[2] + a[5]*b[6] + a[6]*b[10] + a[7]*b[14];
c[7] = a[4]*b[3] + a[5]*b[7] + a[6]*b[11] + a[7]*b[15];
c[ 8] = a[8]*b[0] + a[9]*b[4] + a[10]*b[8] + a[11]*b[12];
c[ 9] = a[8]*b[1] + a[9]*b[5] + a[10]*b[9] + a[11]*b[13];
c[10] = a[8]*b[2] + a[9]*b[6] + a[10]*b[10] + a[11]*b[14];
c[11] = a[8]*b[3] + a[9]*b[7] + a[10]*b[11] + a[11]*b[15];
c[12] = a[12]*b[0] + a[13]*b[4] + a[14]*b[8] + a[15]*b[12];
c[13] = a[12]*b[1] + a[13]*b[5] + a[14]*b[9] + a[15]*b[13];
c[14] = a[12]*b[2] + a[13]*b[6] + a[14]*b[10] + a[15]*b[14];
c[15] = a[12]*b[3] + a[13]*b[7] + a[14]*b[11] + a[15]*b[15];
#endif
}
void load_rotation_mat4(mat4 mat, vec3 v, float angle)
{
float s, c;
float xx, yy, zz, xy, yz, zx, xs, ys, zs, one_c;
s = sin(angle);
c = cos(angle);
// Rotation matrix is normalized
normalize_vec3(&v);
xx = v.x * v.x;
yy = v.y * v.y;
zz = v.z * v.z;
xy = v.x * v.y;
yz = v.y * v.z;
zx = v.z * v.x;
xs = v.x * s;
ys = v.y * s;
zs = v.z * s;
one_c = 1.0f - c;
#ifndef ROW_MAJOR
mat[ 0] = (one_c * xx) + c;
mat[ 4] = (one_c * xy) - zs;
mat[ 8] = (one_c * zx) + ys;
mat[12] = 0.0f;
mat[ 1] = (one_c * xy) + zs;
mat[ 5] = (one_c * yy) + c;
mat[ 9] = (one_c * yz) - xs;
mat[13] = 0.0f;
mat[ 2] = (one_c * zx) - ys;
mat[ 6] = (one_c * yz) + xs;
mat[10] = (one_c * zz) + c;
mat[14] = 0.0f;
mat[ 3] = 0.0f;
mat[ 7] = 0.0f;
mat[11] = 0.0f;
mat[15] = 1.0f;
#else
mat[0] = (one_c * xx) + c;
mat[1] = (one_c * xy) - zs;
mat[2] = (one_c * zx) + ys;
mat[3] = 0.0f;
mat[4] = (one_c * xy) + zs;
mat[5] = (one_c * yy) + c;
mat[6] = (one_c * yz) - xs;
mat[7] = 0.0f;
mat[8] = (one_c * zx) - ys;
mat[9] = (one_c * yz) + xs;
mat[10] = (one_c * zz) + c;
mat[11] = 0.0f;
mat[12] = 0.0f;
mat[13] = 0.0f;
mat[14] = 0.0f;
mat[15] = 1.0f;
#endif
}
/* TODO
static float det_ij(const mat4 m, const int i, const int j)
{
float ret, mat[3][3];
int x = 0, y = 0;
for (int ii=0; ii<4; ++ii) {
y = 0;
if (ii == i) continue;
for (int jj=0; jj<4; ++jj) {
if (jj == j) continue;
mat[x][y] = m[ii*4+jj];
y++;
}
x++;
}
ret = mat[0][0]*(mat[1][1]*mat[2][2]-mat[2][1]*mat[1][2]);
ret -= mat[0][1]*(mat[1][0]*mat[2][2]-mat[2][0]*mat[1][2]);
ret += mat[0][2]*(mat[1][0]*mat[2][1]-mat[2][0]*mat[1][1]);
return ret;
}
void invert_mat4(mat4 mInverse, const mat4& m)
{
int i, j;
float det, detij;
mat4 inverse_mat;
// calculate 4x4 determinant
det = 0.0f;
for (i = 0; i < 4; i++) {
det += (i & 0x1) ? (-m.matrix[i] * det_ij(m, 0, i)) : (m.matrix[i] * det_ij(m, 0, i));
}
det = 1.0f / det;
// calculate inverse
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
detij = det_ij(m, j, i);
inverse_mat[(i*4)+j] = ((i+j) & 0x1) ? (-detij * det) : (detij *det);
}
}
}
*/
////////////////////////////////////////////////////////////////////////////////////////////
//assumes converting from canonical view volume [-1,1]^3
//works just like glViewport, x and y are lower left corner. opengl should be 1.
void make_viewport_matrix(mat4 mat, int x, int y, unsigned int width, unsigned int height, int opengl)
{
float w, h, l, t, b, r;
if (opengl) {
//See glspec page 104, integer grid is lower left pixel corners
w = width, h = height;
l = x, b = y;
//range is [0, w) x [0 , h)
//TODO pick best epsilon?
r = l + w - 0.01; //epsilon larger than float precision
t = b + h - 0.01;
#ifndef ROW_MAJOR
mat[ 0] = (r - l) / 2;
mat[ 4] = 0;
mat[ 8] = 0;
mat[12] = (l + r) / 2;
mat[ 1] = 0;
//see below
mat[ 5] = (t - b) / 2;
mat[ 9] = 0;
mat[13] = (b + t) / 2;
mat[ 2] = 0;
mat[ 6] = 0;
mat[10] = 1;
mat[14] = 0;
mat[ 3] = 0;
mat[ 7] = 0;
mat[11] = 0;
mat[15] = 1;
#else
mat[0] = (r - l) / 2;
mat[1] = 0;
mat[2] = 0;
mat[3] = (l + r) / 2;
mat[4] = 0;
//this used to be negative to flip y till I changed glFramebuffer and draw_pixel to accomplish the same thing
mat[5] = (t - b) / 2;
mat[6] = 0;
mat[7] = (b + t) / 2;
mat[8] = 0;
mat[9] = 0;
mat[10] = 1;
mat[11] = 0;
mat[12] = 0;
mat[13] = 0;
mat[14] = 0;
mat[15] = 1;
#endif
} else {
//old way with pixel centers at integer coordinates
//see pages 133/4 and 144 of FoCG
//necessary for fast integer only bresenham line drawing
w = width, h = height;
l = x - 0.5f;
b = y - 0.5f;
r = l + w;
t = b + h;
#ifndef ROW_MAJOR
mat[ 0] = (r - l) / 2;
mat[ 4] = 0;
mat[ 8] = 0;
mat[12] = (l + r) / 2;
mat[ 1] = 0;
//see below
mat[ 5] = (t - b) / 2;
mat[ 9] = 0;
mat[13] = (b + t) / 2;
mat[ 2] = 0;
mat[ 6] = 0;
mat[10] = 1;
mat[14] = 0;
mat[ 3] = 0;
mat[ 7] = 0;
mat[11] = 0;
mat[15] = 1;
#else
mat[0] = (r - l) / 2;
mat[1] = 0;
mat[2] = 0;
mat[3] = (l + r) / 2;
mat[4] = 0;
//make this negative to reflect y otherwise positive y maps to lower half of the screen
//this is mapping the unit square [-1,1]^2 to the window size. x is fine because it increases left to right
//but the screen coordinates (ie framebuffer memory) increase top to bottom opposite of the canonical square
//negating this is the easiest way to fix it without any side effects.
mat[5] = (t - b) / 2;
mat[6] = 0;
mat[7] = (b + t) / 2;
mat[8] = 0;
mat[9] = 0;
mat[10] = 1;
mat[11] = 0;
mat[12] = 0;
mat[13] = 0;
mat[14] = 0;
mat[15] = 1;
#endif
}
}
//I can't really think of any reason to ever use this matrix alone.
//You'd always do ortho * pers and really if you're doing perspective projection
//just use make_perspective_matrix (or less likely make perspective_proj_matrix)
//
//This function is really just for completeness sake based off of FoCG 3rd edition pg 152
//changed slightly. z_near and z_far are always positive and z_near < z_far
//
//Inconsistently, to generate an ortho matrix to multiply with that will get the equivalent
//of the other 2 functions you'd use -z_near and -z_far and near > far.
void make_pers_matrix(mat4 mat, float z_near, float z_far)
{
#ifndef ROW_MAJOR
mat[ 0] = z_near;
mat[ 4] = 0;
mat[ 8] = 0;
mat[12] = 0;
mat[ 1] = 0;
mat[ 5] = z_near;
mat[ 9] = 0;
mat[13] = 0;
mat[ 2] = 0;
mat[ 6] = 0;
mat[10] = z_near + z_far;
mat[14] = (z_far * z_near);
mat[ 3] = 0;
mat[ 7] = 0;
mat[11] = -1;
mat[15] = 0;
#else
mat[0] = z_near;
mat[1] = 0;
mat[2] = 0;
mat[3] = 0;
mat[4] = 0;
mat[5] = z_near;
mat[6] = 0;
mat[7] = 0;
mat[ 8] = 0;
mat[ 9] = 0;
mat[10] = z_near + z_far;
mat[11] = (z_far * z_near);
mat[12] = 0;
mat[13] = 0;
mat[14] = -1;
mat[15] = 0;
#endif
}
// Create a projection matrix
// Similiar to the old gluPerspective... fov is in radians btw...
void make_perspective_matrix(mat4 mat, float fov, float aspect, float n, float f)
{
float t = n * tanf(fov * 0.5f);
float b = -t;
float l = b * aspect;
float r = -l;
make_perspective_proj_matrix(mat, l, r, b, t, n, f);
}
void make_perspective_proj_matrix(mat4 mat, float l, float r, float b, float t, float n, float f)
{
#ifndef ROW_MAJOR
mat[ 0] = (2.0f * n) / (r - l);
mat[ 4] = 0.0f;
mat[ 8] = (r + l) / (r - l);
mat[12] = 0.0f;
mat[ 1] = 0.0f;
mat[ 5] = (2.0f * n) / (t - b);
mat[ 9] = (t + b) / (t - b);
mat[13] = 0.0f;
mat[ 2] = 0.0f;
mat[ 6] = 0.0f;
mat[10] = -((f + n) / (f - n));
mat[14] = -((2.0f * (f*n))/(f - n));
mat[ 3] = 0.0f;
mat[ 7] = 0.0f;
mat[11] = -1.0f;
mat[15] = 0.0f;
#else
mat[0] = (2.0f * n) / (r - l);
mat[1] = 0.0f;
mat[2] = (r + l) / (r - l);
mat[3] = 0.0f;
mat[4] = 0.0f;
mat[5] = (2.0f * n) / (t - b);
mat[6] = (t + b) / (t - b);
mat[7] = 0.0f;
mat[8] = 0.0f;
mat[9] = 0.0f;
mat[10] = -((f + n) / (f - n));
mat[11] = -((2.0f * (f*n))/(f - n));
mat[12] = 0.0f;
mat[13] = 0.0f;
mat[14] = -1.0f;
mat[15] = 0.0f;
#endif
}
//n and f really are near and far not min and max so if you want the standard looking down the -z axis
// then n > f otherwise n < f
void make_orthographic_matrix(mat4 mat, float l, float r, float b, float t, float n, float f)
{
#ifndef ROW_MAJOR
mat[ 0] = 2.0f / (r - l);
mat[ 4] = 0;
mat[ 8] = 0;
mat[12] = -((r + l)/(r - l));
mat[ 1] = 0;
mat[ 5] = 2.0f / (t - b);
mat[ 9] = 0;
mat[13] = -((t + b)/(t - b));
mat[ 2] = 0;
mat[ 6] = 0;
mat[10] = 2.0f / (f - n); //removed - in front of 2 . . . book doesn't have it but superbible did
mat[14] = -((n + f)/(f - n));
mat[ 3] = 0;
mat[ 7] = 0;
mat[11] = 0;
mat[15] = 1;
#else
mat[0] = 2.0f / (r - l);
mat[1] = 0;
mat[2] = 0;
mat[3] = -((r + l)/(r - l));
mat[4] = 0;
mat[5] = 2.0f / (t - b);
mat[6] = 0;
mat[7] = -((t + b)/(t - b));
mat[8] = 0;
mat[9] = 0;
mat[10] = 2.0f / (f - n); //removed - in front of 2 . . . book doesn't have it but superbible did
mat[11] = -((n + f)/(f - n));
mat[12] = 0;
mat[13] = 0;
mat[14] = 0;
mat[15] = 1;
#endif
//now I know why the superbible had the -
//OpenGL uses a left handed canonical view volume [-1,1]^3 when passed the identity matrix
//ie in Normalized Device Coordinates. The math/matrix presented in Fundamentals of Computer
//Graphics assumes a right handed version of the same volume. The negative isn't necessary
//if you set n and f correctly as near and far not low and high
}
//per https://www.opengl.org/sdk/docs/man2/xhtml/gluLookAt.xml
//and glm.g-truc.net (glm/gtc/matrix_transform.inl)
void lookAt(mat4 mat, vec3 eye, vec3 center, vec3 up)
{
SET_IDENTITY_MAT4(mat);
vec3 f = norm_vec3(sub_vec3s(center, eye));
vec3 s = norm_vec3(cross_product(f, up));
vec3 u = cross_product(s, f);
setx_mat4v3(mat, s);
sety_mat4v3(mat, u);
setz_mat4v3(mat, negate_vec3(f));
setc4_mat4v3(mat, make_vec3(-dot_vec3s(s, eye), -dot_vec3s(u, eye), dot_vec3s(f, eye)));
}
#define CVECTOR_glVertex_Array_IMPLEMENTATION
#ifndef CVECTOR_glVertex_Array_H
#define CVECTOR_glVertex_Array_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glVertex_Array vector. */
typedef struct cvector_glVertex_Array
{
glVertex_Array* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glVertex_Array;
extern size_t CVEC_glVertex_Array_SZ;
int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity);
int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num);
cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity);
cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num);
int cvec_copyc_glVertex_Array(void* dest, void* src);
int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src);
int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a);
glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec);
int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num);
int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a);
int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num);
glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a);
void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end);
int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size);
int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size);
void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val);
void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val);
glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec);
void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec);
void cvec_free_glVertex_Array_heap(void* vec);
void cvec_free_glVertex_Array(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glVertex_Array_H */
#endif
#ifdef CVECTOR_glVertex_Array_IMPLEMENTATION
size_t CVEC_glVertex_Array_SZ = 50;
#define CVEC_glVertex_Array_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glVertex_Array* cvec_glVertex_Array_heap(size_t size, size_t capacity)
{
cvector_glVertex_Array* vec;
if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glVertex_Array* cvec_init_glVertex_Array_heap(glVertex_Array* vals, size_t num)
{
cvector_glVertex_Array* vec;
if (!(vec = (cvector_glVertex_Array*)CVEC_MALLOC(sizeof(cvector_glVertex_Array)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glVertex_Array_SZ;
vec->size = num;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num);
return vec;
}
int cvec_glVertex_Array(cvector_glVertex_Array* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_Array_SZ;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array* vals, size_t num)
{
vec->capacity = num + CVEC_glVertex_Array_SZ;
vec->size = num;
if (!(vec->a = (glVertex_Array*)CVEC_MALLOC(vec->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex_Array)*num);
return 1;
}
int cvec_copyc_glVertex_Array(void* dest, void* src)
{
cvector_glVertex_Array* vec1 = (cvector_glVertex_Array*)dest;
cvector_glVertex_Array* vec2 = (cvector_glVertex_Array*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glVertex_Array(vec1, vec2);
}
int cvec_copy_glVertex_Array(cvector_glVertex_Array* dest, cvector_glVertex_Array* src)
{
glVertex_Array* tmp = NULL;
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex_Array)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex_Array));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array a)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glVertex_Array cvec_pop_glVertex_Array(cvector_glVertex_Array* vec)
{
return vec->a[--vec->size];
}
glVertex_Array* cvec_back_glVertex_Array(cvector_glVertex_Array* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glVertex_Array(cvector_glVertex_Array* vec, size_t num)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ;
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glVertex_Array_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array* a, size_t num)
{
glVertex_Array* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_Array_SZ;
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex_Array));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex_Array));
vec->size += num;
return 1;
}
glVertex_Array cvec_replace_glVertex_Array(cvector_glVertex_Array* vec, size_t i, glVertex_Array a)
{
glVertex_Array tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glVertex_Array(cvector_glVertex_Array* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex_Array));
vec->size -= d;
}
int cvec_reserve_glVertex_Array(cvector_glVertex_Array* vec, size_t size)
{
glVertex_Array* tmp;
if (vec->capacity < size) {
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*(size+CVEC_glVertex_Array_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glVertex_Array_SZ;
}
return 1;
}
int cvec_set_cap_glVertex_Array(cvector_glVertex_Array* vec, size_t size)
{
glVertex_Array* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glVertex_Array*)CVEC_REALLOC(vec->a, sizeof(glVertex_Array)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glVertex_Array(cvector_glVertex_Array* vec, glVertex_Array val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glVertex_Array(cvector_glVertex_Array* vec) { vec->size = 0; }
void cvec_free_glVertex_Array_heap(void* vec)
{
cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glVertex_Array(void* vec)
{
cvector_glVertex_Array* tmp = (cvector_glVertex_Array*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#define CVECTOR_glBuffer_IMPLEMENTATION
#ifndef CVECTOR_glBuffer_H
#define CVECTOR_glBuffer_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glBuffer vector. */
typedef struct cvector_glBuffer
{
glBuffer* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glBuffer;
extern size_t CVEC_glBuffer_SZ;
int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity);
int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num);
cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity);
cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num);
int cvec_copyc_glBuffer(void* dest, void* src);
int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src);
int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a);
glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec);
int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num);
int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a);
int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num);
glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a);
void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end);
int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size);
int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size);
void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val);
void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val);
glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec);
void cvec_clear_glBuffer(cvector_glBuffer* vec);
void cvec_free_glBuffer_heap(void* vec);
void cvec_free_glBuffer(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glBuffer_H */
#endif
#ifdef CVECTOR_glBuffer_IMPLEMENTATION
size_t CVEC_glBuffer_SZ = 50;
#define CVEC_glBuffer_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glBuffer* cvec_glBuffer_heap(size_t size, size_t capacity)
{
cvector_glBuffer* vec;
if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glBuffer* cvec_init_glBuffer_heap(glBuffer* vals, size_t num)
{
cvector_glBuffer* vec;
if (!(vec = (cvector_glBuffer*)CVEC_MALLOC(sizeof(cvector_glBuffer)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glBuffer_SZ;
vec->size = num;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num);
return vec;
}
int cvec_glBuffer(cvector_glBuffer* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glBuffer_SZ;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glBuffer(cvector_glBuffer* vec, glBuffer* vals, size_t num)
{
vec->capacity = num + CVEC_glBuffer_SZ;
vec->size = num;
if (!(vec->a = (glBuffer*)CVEC_MALLOC(vec->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glBuffer)*num);
return 1;
}
int cvec_copyc_glBuffer(void* dest, void* src)
{
cvector_glBuffer* vec1 = (cvector_glBuffer*)dest;
cvector_glBuffer* vec2 = (cvector_glBuffer*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glBuffer(vec1, vec2);
}
int cvec_copy_glBuffer(cvector_glBuffer* dest, cvector_glBuffer* src)
{
glBuffer* tmp = NULL;
if (!(tmp = (glBuffer*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glBuffer)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glBuffer));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glBuffer(cvector_glBuffer* vec, glBuffer a)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity);
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glBuffer cvec_pop_glBuffer(cvector_glBuffer* vec)
{
return vec->a[--vec->size];
}
glBuffer* cvec_back_glBuffer(cvector_glBuffer* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glBuffer(cvector_glBuffer* vec, size_t num)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ;
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glBuffer_ALLOCATOR(vec->capacity);
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glBuffer));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer* a, size_t num)
{
glBuffer* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glBuffer_SZ;
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glBuffer));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glBuffer));
vec->size += num;
return 1;
}
glBuffer cvec_replace_glBuffer(cvector_glBuffer* vec, size_t i, glBuffer a)
{
glBuffer tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glBuffer(cvector_glBuffer* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glBuffer));
vec->size -= d;
}
int cvec_reserve_glBuffer(cvector_glBuffer* vec, size_t size)
{
glBuffer* tmp;
if (vec->capacity < size) {
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*(size+CVEC_glBuffer_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glBuffer_SZ;
}
return 1;
}
int cvec_set_cap_glBuffer(cvector_glBuffer* vec, size_t size)
{
glBuffer* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glBuffer*)CVEC_REALLOC(vec->a, sizeof(glBuffer)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glBuffer(cvector_glBuffer* vec, glBuffer val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glBuffer(cvector_glBuffer* vec, glBuffer val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glBuffer(cvector_glBuffer* vec) { vec->size = 0; }
void cvec_free_glBuffer_heap(void* vec)
{
cvector_glBuffer* tmp = (cvector_glBuffer*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glBuffer(void* vec)
{
cvector_glBuffer* tmp = (cvector_glBuffer*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#define CVECTOR_glTexture_IMPLEMENTATION
#ifndef CVECTOR_glTexture_H
#define CVECTOR_glTexture_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glTexture vector. */
typedef struct cvector_glTexture
{
glTexture* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glTexture;
extern size_t CVEC_glTexture_SZ;
int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity);
int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num);
cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity);
cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num);
int cvec_copyc_glTexture(void* dest, void* src);
int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src);
int cvec_push_glTexture(cvector_glTexture* vec, glTexture a);
glTexture cvec_pop_glTexture(cvector_glTexture* vec);
int cvec_extend_glTexture(cvector_glTexture* vec, size_t num);
int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a);
int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num);
glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a);
void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end);
int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size);
int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size);
void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val);
void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val);
glTexture* cvec_back_glTexture(cvector_glTexture* vec);
void cvec_clear_glTexture(cvector_glTexture* vec);
void cvec_free_glTexture_heap(void* vec);
void cvec_free_glTexture(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glTexture_H */
#endif
#ifdef CVECTOR_glTexture_IMPLEMENTATION
size_t CVEC_glTexture_SZ = 50;
#define CVEC_glTexture_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glTexture* cvec_glTexture_heap(size_t size, size_t capacity)
{
cvector_glTexture* vec;
if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glTexture* cvec_init_glTexture_heap(glTexture* vals, size_t num)
{
cvector_glTexture* vec;
if (!(vec = (cvector_glTexture*)CVEC_MALLOC(sizeof(cvector_glTexture)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glTexture_SZ;
vec->size = num;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num);
return vec;
}
int cvec_glTexture(cvector_glTexture* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glTexture_SZ;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glTexture(cvector_glTexture* vec, glTexture* vals, size_t num)
{
vec->capacity = num + CVEC_glTexture_SZ;
vec->size = num;
if (!(vec->a = (glTexture*)CVEC_MALLOC(vec->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glTexture)*num);
return 1;
}
int cvec_copyc_glTexture(void* dest, void* src)
{
cvector_glTexture* vec1 = (cvector_glTexture*)dest;
cvector_glTexture* vec2 = (cvector_glTexture*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glTexture(vec1, vec2);
}
int cvec_copy_glTexture(cvector_glTexture* dest, cvector_glTexture* src)
{
glTexture* tmp = NULL;
if (!(tmp = (glTexture*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glTexture)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glTexture));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glTexture(cvector_glTexture* vec, glTexture a)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity);
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glTexture cvec_pop_glTexture(cvector_glTexture* vec)
{
return vec->a[--vec->size];
}
glTexture* cvec_back_glTexture(cvector_glTexture* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glTexture(cvector_glTexture* vec, size_t num)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glTexture_SZ;
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glTexture(cvector_glTexture* vec, size_t i, glTexture a)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glTexture_ALLOCATOR(vec->capacity);
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glTexture));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glTexture(cvector_glTexture* vec, size_t i, glTexture* a, size_t num)
{
glTexture* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glTexture_SZ;
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glTexture));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glTexture));
vec->size += num;
return 1;
}
glTexture cvec_replace_glTexture(cvector_glTexture* vec, size_t i, glTexture a)
{
glTexture tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glTexture(cvector_glTexture* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glTexture));
vec->size -= d;
}
int cvec_reserve_glTexture(cvector_glTexture* vec, size_t size)
{
glTexture* tmp;
if (vec->capacity < size) {
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*(size+CVEC_glTexture_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glTexture_SZ;
}
return 1;
}
int cvec_set_cap_glTexture(cvector_glTexture* vec, size_t size)
{
glTexture* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glTexture*)CVEC_REALLOC(vec->a, sizeof(glTexture)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glTexture(cvector_glTexture* vec, glTexture val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glTexture(cvector_glTexture* vec, glTexture val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glTexture(cvector_glTexture* vec) { vec->size = 0; }
void cvec_free_glTexture_heap(void* vec)
{
cvector_glTexture* tmp = (cvector_glTexture*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glTexture(void* vec)
{
cvector_glTexture* tmp = (cvector_glTexture*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#define CVECTOR_glProgram_IMPLEMENTATION
#ifndef CVECTOR_glProgram_H
#define CVECTOR_glProgram_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glProgram vector. */
typedef struct cvector_glProgram
{
glProgram* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glProgram;
extern size_t CVEC_glProgram_SZ;
int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity);
int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num);
cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity);
cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num);
int cvec_copyc_glProgram(void* dest, void* src);
int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src);
int cvec_push_glProgram(cvector_glProgram* vec, glProgram a);
glProgram cvec_pop_glProgram(cvector_glProgram* vec);
int cvec_extend_glProgram(cvector_glProgram* vec, size_t num);
int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a);
int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num);
glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a);
void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end);
int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size);
int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size);
void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val);
void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val);
glProgram* cvec_back_glProgram(cvector_glProgram* vec);
void cvec_clear_glProgram(cvector_glProgram* vec);
void cvec_free_glProgram_heap(void* vec);
void cvec_free_glProgram(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glProgram_H */
#endif
#ifdef CVECTOR_glProgram_IMPLEMENTATION
size_t CVEC_glProgram_SZ = 50;
#define CVEC_glProgram_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glProgram* cvec_glProgram_heap(size_t size, size_t capacity)
{
cvector_glProgram* vec;
if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glProgram* cvec_init_glProgram_heap(glProgram* vals, size_t num)
{
cvector_glProgram* vec;
if (!(vec = (cvector_glProgram*)CVEC_MALLOC(sizeof(cvector_glProgram)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glProgram_SZ;
vec->size = num;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num);
return vec;
}
int cvec_glProgram(cvector_glProgram* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glProgram_SZ;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glProgram(cvector_glProgram* vec, glProgram* vals, size_t num)
{
vec->capacity = num + CVEC_glProgram_SZ;
vec->size = num;
if (!(vec->a = (glProgram*)CVEC_MALLOC(vec->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glProgram)*num);
return 1;
}
int cvec_copyc_glProgram(void* dest, void* src)
{
cvector_glProgram* vec1 = (cvector_glProgram*)dest;
cvector_glProgram* vec2 = (cvector_glProgram*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glProgram(vec1, vec2);
}
int cvec_copy_glProgram(cvector_glProgram* dest, cvector_glProgram* src)
{
glProgram* tmp = NULL;
if (!(tmp = (glProgram*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glProgram)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glProgram));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glProgram(cvector_glProgram* vec, glProgram a)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity);
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glProgram cvec_pop_glProgram(cvector_glProgram* vec)
{
return vec->a[--vec->size];
}
glProgram* cvec_back_glProgram(cvector_glProgram* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glProgram(cvector_glProgram* vec, size_t num)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glProgram_SZ;
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glProgram(cvector_glProgram* vec, size_t i, glProgram a)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glProgram_ALLOCATOR(vec->capacity);
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glProgram));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glProgram(cvector_glProgram* vec, size_t i, glProgram* a, size_t num)
{
glProgram* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glProgram_SZ;
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glProgram));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glProgram));
vec->size += num;
return 1;
}
glProgram cvec_replace_glProgram(cvector_glProgram* vec, size_t i, glProgram a)
{
glProgram tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glProgram(cvector_glProgram* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glProgram));
vec->size -= d;
}
int cvec_reserve_glProgram(cvector_glProgram* vec, size_t size)
{
glProgram* tmp;
if (vec->capacity < size) {
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*(size+CVEC_glProgram_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glProgram_SZ;
}
return 1;
}
int cvec_set_cap_glProgram(cvector_glProgram* vec, size_t size)
{
glProgram* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glProgram*)CVEC_REALLOC(vec->a, sizeof(glProgram)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glProgram(cvector_glProgram* vec, glProgram val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glProgram(cvector_glProgram* vec, glProgram val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glProgram(cvector_glProgram* vec) { vec->size = 0; }
void cvec_free_glProgram_heap(void* vec)
{
cvector_glProgram* tmp = (cvector_glProgram*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glProgram(void* vec)
{
cvector_glProgram* tmp = (cvector_glProgram*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#define CVECTOR_glVertex_IMPLEMENTATION
#ifndef CVECTOR_glVertex_H
#define CVECTOR_glVertex_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for glVertex vector. */
typedef struct cvector_glVertex
{
glVertex* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_glVertex;
extern size_t CVEC_glVertex_SZ;
int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity);
int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num);
cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity);
cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num);
int cvec_copyc_glVertex(void* dest, void* src);
int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src);
int cvec_push_glVertex(cvector_glVertex* vec, glVertex a);
glVertex cvec_pop_glVertex(cvector_glVertex* vec);
int cvec_extend_glVertex(cvector_glVertex* vec, size_t num);
int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a);
int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num);
glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a);
void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end);
int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size);
int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size);
void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val);
void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val);
glVertex* cvec_back_glVertex(cvector_glVertex* vec);
void cvec_clear_glVertex(cvector_glVertex* vec);
void cvec_free_glVertex_heap(void* vec);
void cvec_free_glVertex(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_glVertex_H */
#endif
#ifdef CVECTOR_glVertex_IMPLEMENTATION
size_t CVEC_glVertex_SZ = 50;
#define CVEC_glVertex_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_glVertex* cvec_glVertex_heap(size_t size, size_t capacity)
{
cvector_glVertex* vec;
if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_glVertex* cvec_init_glVertex_heap(glVertex* vals, size_t num)
{
cvector_glVertex* vec;
if (!(vec = (cvector_glVertex*)CVEC_MALLOC(sizeof(cvector_glVertex)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_glVertex_SZ;
vec->size = num;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num);
return vec;
}
int cvec_glVertex(cvector_glVertex* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_glVertex_SZ;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_glVertex(cvector_glVertex* vec, glVertex* vals, size_t num)
{
vec->capacity = num + CVEC_glVertex_SZ;
vec->size = num;
if (!(vec->a = (glVertex*)CVEC_MALLOC(vec->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(glVertex)*num);
return 1;
}
int cvec_copyc_glVertex(void* dest, void* src)
{
cvector_glVertex* vec1 = (cvector_glVertex*)dest;
cvector_glVertex* vec2 = (cvector_glVertex*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_glVertex(vec1, vec2);
}
int cvec_copy_glVertex(cvector_glVertex* dest, cvector_glVertex* src)
{
glVertex* tmp = NULL;
if (!(tmp = (glVertex*)CVEC_REALLOC(dest->a, src->capacity*sizeof(glVertex)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(glVertex));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_glVertex(cvector_glVertex* vec, glVertex a)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
glVertex cvec_pop_glVertex(cvector_glVertex* vec)
{
return vec->a[--vec->size];
}
glVertex* cvec_back_glVertex(cvector_glVertex* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_glVertex(cvector_glVertex* vec, size_t num)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_SZ;
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_glVertex(cvector_glVertex* vec, size_t i, glVertex a)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex));
vec->a[i] = a;
} else {
tmp_sz = CVEC_glVertex_ALLOCATOR(vec->capacity);
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(glVertex));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_glVertex(cvector_glVertex* vec, size_t i, glVertex* a, size_t num)
{
glVertex* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_glVertex_SZ;
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(glVertex));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(glVertex));
vec->size += num;
return 1;
}
glVertex cvec_replace_glVertex(cvector_glVertex* vec, size_t i, glVertex a)
{
glVertex tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_glVertex(cvector_glVertex* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(glVertex));
vec->size -= d;
}
int cvec_reserve_glVertex(cvector_glVertex* vec, size_t size)
{
glVertex* tmp;
if (vec->capacity < size) {
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*(size+CVEC_glVertex_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_glVertex_SZ;
}
return 1;
}
int cvec_set_cap_glVertex(cvector_glVertex* vec, size_t size)
{
glVertex* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (glVertex*)CVEC_REALLOC(vec->a, sizeof(glVertex)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_glVertex(cvector_glVertex* vec, glVertex val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_glVertex(cvector_glVertex* vec, glVertex val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_glVertex(cvector_glVertex* vec) { vec->size = 0; }
void cvec_free_glVertex_heap(void* vec)
{
cvector_glVertex* tmp = (cvector_glVertex*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_glVertex(void* vec)
{
cvector_glVertex* tmp = (cvector_glVertex*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
#define CVECTOR_float_IMPLEMENTATION
#ifndef CVECTOR_float_H
#define CVECTOR_float_H
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Data structure for float vector. */
typedef struct cvector_float
{
float* a; /**< Array. */
size_t size; /**< Current size (amount you use when manipulating array directly). */
size_t capacity; /**< Allocated size of array; always >= size. */
} cvector_float;
extern size_t CVEC_float_SZ;
int cvec_float(cvector_float* vec, size_t size, size_t capacity);
int cvec_init_float(cvector_float* vec, float* vals, size_t num);
cvector_float* cvec_float_heap(size_t size, size_t capacity);
cvector_float* cvec_init_float_heap(float* vals, size_t num);
int cvec_copyc_float(void* dest, void* src);
int cvec_copy_float(cvector_float* dest, cvector_float* src);
int cvec_push_float(cvector_float* vec, float a);
float cvec_pop_float(cvector_float* vec);
int cvec_extend_float(cvector_float* vec, size_t num);
int cvec_insert_float(cvector_float* vec, size_t i, float a);
int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num);
float cvec_replace_float(cvector_float* vec, size_t i, float a);
void cvec_erase_float(cvector_float* vec, size_t start, size_t end);
int cvec_reserve_float(cvector_float* vec, size_t size);
int cvec_set_cap_float(cvector_float* vec, size_t size);
void cvec_set_val_sz_float(cvector_float* vec, float val);
void cvec_set_val_cap_float(cvector_float* vec, float val);
float* cvec_back_float(cvector_float* vec);
void cvec_clear_float(cvector_float* vec);
void cvec_free_float_heap(void* vec);
void cvec_free_float(void* vec);
#ifdef __cplusplus
}
#endif
/* CVECTOR_float_H */
#endif
#ifdef CVECTOR_float_IMPLEMENTATION
size_t CVEC_float_SZ = 50;
#define CVEC_float_ALLOCATOR(x) ((x+1) * 2)
#if defined(CVEC_MALLOC) && defined(CVEC_FREE) && defined(CVEC_REALLOC)
/* ok */
#elif !defined(CVEC_MALLOC) && !defined(CVEC_FREE) && !defined(CVEC_REALLOC)
/* ok */
#else
#error "Must define all or none of CVEC_MALLOC, CVEC_FREE, and CVEC_REALLOC."
#endif
#ifndef CVEC_MALLOC
#define CVEC_MALLOC(sz) malloc(sz)
#define CVEC_REALLOC(p, sz) realloc(p, sz)
#define CVEC_FREE(p) free(p)
#endif
#ifndef CVEC_MEMMOVE
#include <string.h>
#define CVEC_MEMMOVE(dst, src, sz) memmove(dst, src, sz)
#endif
#ifndef CVEC_ASSERT
#include <assert.h>
#define CVEC_ASSERT(x) assert(x)
#endif
cvector_float* cvec_float_heap(size_t size, size_t capacity)
{
cvector_float* vec;
if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
return vec;
}
cvector_float* cvec_init_float_heap(float* vals, size_t num)
{
cvector_float* vec;
if (!(vec = (cvector_float*)CVEC_MALLOC(sizeof(cvector_float)))) {
CVEC_ASSERT(vec != NULL);
return NULL;
}
vec->capacity = num + CVEC_float_SZ;
vec->size = num;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
CVEC_FREE(vec);
return NULL;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num);
return vec;
}
int cvec_float(cvector_float* vec, size_t size, size_t capacity)
{
vec->size = size;
vec->capacity = (capacity > vec->size || (vec->size && capacity == vec->size)) ? capacity : vec->size + CVEC_float_SZ;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
return 1;
}
int cvec_init_float(cvector_float* vec, float* vals, size_t num)
{
vec->capacity = num + CVEC_float_SZ;
vec->size = num;
if (!(vec->a = (float*)CVEC_MALLOC(vec->capacity*sizeof(float)))) {
CVEC_ASSERT(vec->a != NULL);
vec->size = vec->capacity = 0;
return 0;
}
CVEC_MEMMOVE(vec->a, vals, sizeof(float)*num);
return 1;
}
int cvec_copyc_float(void* dest, void* src)
{
cvector_float* vec1 = (cvector_float*)dest;
cvector_float* vec2 = (cvector_float*)src;
vec1->a = NULL;
vec1->size = 0;
vec1->capacity = 0;
return cvec_copy_float(vec1, vec2);
}
int cvec_copy_float(cvector_float* dest, cvector_float* src)
{
float* tmp = NULL;
if (!(tmp = (float*)CVEC_REALLOC(dest->a, src->capacity*sizeof(float)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
dest->a = tmp;
CVEC_MEMMOVE(dest->a, src->a, src->size*sizeof(float));
dest->size = src->size;
dest->capacity = src->capacity;
return 1;
}
int cvec_push_float(cvector_float* vec, float a)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
vec->a[vec->size++] = a;
} else {
tmp_sz = CVEC_float_ALLOCATOR(vec->capacity);
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->a[vec->size++] = a;
vec->capacity = tmp_sz;
}
return 1;
}
float cvec_pop_float(cvector_float* vec)
{
return vec->a[--vec->size];
}
float* cvec_back_float(cvector_float* vec)
{
return &vec->a[vec->size-1];
}
int cvec_extend_float(cvector_float* vec, size_t num)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_float_SZ;
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
vec->size += num;
return 1;
}
int cvec_insert_float(cvector_float* vec, size_t i, float a)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity > vec->size) {
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float));
vec->a[i] = a;
} else {
tmp_sz = CVEC_float_ALLOCATOR(vec->capacity);
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
CVEC_MEMMOVE(&vec->a[i+1], &vec->a[i], (vec->size-i)*sizeof(float));
vec->a[i] = a;
vec->capacity = tmp_sz;
}
vec->size++;
return 1;
}
int cvec_insert_array_float(cvector_float* vec, size_t i, float* a, size_t num)
{
float* tmp;
size_t tmp_sz;
if (vec->capacity < vec->size + num) {
tmp_sz = vec->capacity + num + CVEC_float_SZ;
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*tmp_sz))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = tmp_sz;
}
CVEC_MEMMOVE(&vec->a[i+num], &vec->a[i], (vec->size-i)*sizeof(float));
CVEC_MEMMOVE(&vec->a[i], a, num*sizeof(float));
vec->size += num;
return 1;
}
float cvec_replace_float(cvector_float* vec, size_t i, float a)
{
float tmp = vec->a[i];
vec->a[i] = a;
return tmp;
}
void cvec_erase_float(cvector_float* vec, size_t start, size_t end)
{
size_t d = end - start + 1;
CVEC_MEMMOVE(&vec->a[start], &vec->a[end+1], (vec->size-1-end)*sizeof(float));
vec->size -= d;
}
int cvec_reserve_float(cvector_float* vec, size_t size)
{
float* tmp;
if (vec->capacity < size) {
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*(size+CVEC_float_SZ)))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size + CVEC_float_SZ;
}
return 1;
}
int cvec_set_cap_float(cvector_float* vec, size_t size)
{
float* tmp;
if (size < vec->size) {
vec->size = size;
}
if (!(tmp = (float*)CVEC_REALLOC(vec->a, sizeof(float)*size))) {
CVEC_ASSERT(tmp != NULL);
return 0;
}
vec->a = tmp;
vec->capacity = size;
return 1;
}
void cvec_set_val_sz_float(cvector_float* vec, float val)
{
size_t i;
for (i=0; i<vec->size; i++) {
vec->a[i] = val;
}
}
void cvec_set_val_cap_float(cvector_float* vec, float val)
{
size_t i;
for (i=0; i<vec->capacity; i++) {
vec->a[i] = val;
}
}
void cvec_clear_float(cvector_float* vec) { vec->size = 0; }
void cvec_free_float_heap(void* vec)
{
cvector_float* tmp = (cvector_float*)vec;
if (!tmp) return;
CVEC_FREE(tmp->a);
CVEC_FREE(tmp);
}
void cvec_free_float(void* vec)
{
cvector_float* tmp = (cvector_float*)vec;
CVEC_FREE(tmp->a);
tmp->size = 0;
tmp->capacity = 0;
}
#endif
static glContext* c;
static Color blend_pixel(vec4 src, vec4 dst);
static void draw_pixel_vec2(vec4 cf, vec2 pos, float z);
static void draw_pixel(vec4 cf, int x, int y, float z);
static void run_pipeline(GLenum mode, GLint first, GLsizei count, GLsizei instance, GLuint base_instance, GLboolean use_elements);
static void draw_triangle_clip(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke, int clip_bit);
static void draw_triangle_point(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke);
static void draw_triangle_line(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke);
static void draw_triangle_fill(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke);
static void draw_triangle_final(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke);
static void draw_triangle(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke);
static void draw_line_clip(glVertex* v1, glVertex* v2);
static void draw_line_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke);
static void draw_line_smooth_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke);
/* this clip epsilon is needed to avoid some rounding errors after
several clipping stages */
#define CLIP_EPSILON (1E-5)
static inline int gl_clipcode(vec4 pt)
{
float w;
w = pt.w * (1.0 + CLIP_EPSILON);
return
(((pt.z < -w) |
((pt.z > w) << 1)) &
(!c->depth_clamp |
!c->depth_clamp << 1)) |
((pt.x < -w) << 2) |
((pt.x > w) << 3) |
((pt.y < -w) << 4) |
((pt.y > w) << 5);
}
static int is_front_facing(glVertex* v0, glVertex* v1, glVertex* v2)
{
//according to docs culling is done based on window coordinates
//See page 3.6.1 page 116 of glspec33.core for more on rasterization, culling etc.
//
//TODO bug where it doesn't correctly cull if part of the triangle goes behind eye
vec3 normal, tmpvec3 = { 0, 0, 1 };
vec3 p0 = vec4_to_vec3h(v0->screen_space);
vec3 p1 = vec4_to_vec3h(v1->screen_space);
vec3 p2 = vec4_to_vec3h(v2->screen_space);
//float a;
//method from spec
//a = p0.x*p1.y - p1.x*p0.y + p1.x*p2.y - p2.x*p1.y + p2.x*p0.y - p0.x*p2.y;
//a /= 2;
normal = cross_product(sub_vec3s(p1, p0), sub_vec3s(p2, p0));
if (c->front_face == GL_CW) {
//a = -a;
normal = negate_vec3(normal);
}
//if (a <= 0) {
if (dot_vec3s(normal, tmpvec3) <= 0) {
return 0;
}
return 1;
}
static void do_vertex(glVertex_Attrib* v, int* enabled, unsigned int num_enabled, unsigned int i, unsigned int vert)
{
GLuint buf;
u8* buf_pos;
vec4 tmpvec4;
// copy/prep vertex attributes from buffers into appropriate positions for vertex shader to access
for (int j=0; j<num_enabled; ++j) {
buf = v[enabled[j]].buf;
buf_pos = (u8*)c->buffers.a[buf].data + v[enabled[j]].offset + v[enabled[j]].stride*i;
SET_VEC4(tmpvec4, 0.0f, 0.0f, 0.0f, 1.0f);
memcpy(&tmpvec4, buf_pos, sizeof(float)*v[enabled[j]].size);
c->vertex_attribs_vs[enabled[j]] = tmpvec4;
}
float* vs_out = &c->vs_output.output_buf.a[vert*c->vs_output.size];
c->programs.a[c->cur_program].vertex_shader(vs_out, c->vertex_attribs_vs, &c->builtins, c->programs.a[c->cur_program].uniform);
c->glverts.a[vert].vs_out = vs_out;
c->glverts.a[vert].clip_space = c->builtins.gl_Position;
c->glverts.a[vert].edge_flag = 1;
c->glverts.a[vert].clip_code = gl_clipcode(c->builtins.gl_Position);
}
static void vertex_stage(GLint first, GLsizei count, GLsizei instance_id, GLuint base_instance, GLboolean use_elements)
{
unsigned int i, j, vert, num_enabled;
u8* buf_pos;
//save checking if enabled on every loop if we build this first
//also initialize the vertex_attrib space
float vec4_init[] = { 0.0f, 0.0f, 0.0f, 1.0f };
int enabled[GL_MAX_VERTEX_ATTRIBS] = { 0 };
glVertex_Attrib* v = c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs;
GLuint elem_buffer = c->vertex_arrays.a[c->cur_vertex_array].element_buffer;
for (i=0, j=0; i<GL_MAX_VERTEX_ATTRIBS; ++i) {
if (v[i].enabled) {
if (v[i].divisor == 0) {
// no need to set to defalt vector here because it's handled in do_vertex()
enabled[j++] = i;
} else if (!(instance_id % v[i].divisor)) { //set instanced attributes if necessary
// only reset to default vector right before updating, because
// it has to stay the same across multiple instances for divisors > 1
memcpy(&c->vertex_attribs_vs[i], vec4_init, sizeof(vec4));
int n = instance_id/v[i].divisor + base_instance;
buf_pos = (u8*)c->buffers.a[v[i].buf].data + v[i].offset + v[i].stride*n;
memcpy(&c->vertex_attribs_vs[i], buf_pos, sizeof(float)*v[i].size);
}
}
}
num_enabled = j;
cvec_reserve_glVertex(&c->glverts, count);
c->builtins.gl_InstanceID = instance_id;
if (!use_elements) {
for (vert=0, i=first; i<first+count; ++i, ++vert) {
do_vertex(v, enabled, num_enabled, i, vert);
}
} else {
GLuint* uint_array = (GLuint*) c->buffers.a[elem_buffer].data;
GLushort* ushort_array = (GLushort*) c->buffers.a[elem_buffer].data;
GLubyte* ubyte_array = (GLubyte*) c->buffers.a[elem_buffer].data;
if (c->buffers.a[elem_buffer].type == GL_UNSIGNED_BYTE) {
for (vert=0, i=first; i<first+count; ++i, ++vert) {
do_vertex(v, enabled, num_enabled, ubyte_array[i], vert);
}
} else if (c->buffers.a[elem_buffer].type == GL_UNSIGNED_SHORT) {
for (vert=0, i=first; i<first+count; ++i, ++vert) {
do_vertex(v, enabled, num_enabled, ushort_array[i], vert);
}
} else {
for (vert=0, i=first; i<first+count; ++i, ++vert) {
do_vertex(v, enabled, num_enabled, uint_array[i], vert);
}
}
}
}
//TODO make fs_input static? or a member of glContext?
static void draw_point(glVertex* vert)
{
float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
vec3 point = vec4_to_vec3h(vert->screen_space);
point.z = MAP(point.z, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far);
//TODO not sure if I'm supposed to do this ... doesn't say to in spec but it is called depth clamping
//but I don't do it for lines or triangles (at least in fill or line mode)
if (c->depth_clamp)
point.z = clampf_01(point.z);
//TODO why not just pass vs_output directly? hmmm...
memcpy(fs_input, vert->vs_out, c->vs_output.size*sizeof(float));
//accounting for pixel centers at 0.5, using truncation
float x = point.x + 0.5f;
float y = point.y + 0.5f;
float p_size = c->point_size;
float origin = (c->point_spr_origin == GL_UPPER_LEFT) ? -1.0f : 1.0f;
// Can easily clip whole point when point size <= 1 ...
if (p_size <= 1) {
if (x < 0 || y < 0 || x >= c->back_buffer.w || y >= c->back_buffer.h)
return;
}
for (float i = y-p_size/2; i<y+p_size/2; ++i) {
if (i < 0 || i >= c->back_buffer.h)
continue;
for (float j = x-p_size/2; j<x+p_size/2; ++j) {
if (j < 0 || j >= c->back_buffer.w)
continue;
// per page 110 of 3.3 spec
c->builtins.gl_PointCoord.x = 0.5f + ((int)j + 0.5f - point.x)/p_size;
c->builtins.gl_PointCoord.y = 0.5f + origin * ((int)i + 0.5f - point.y)/p_size;
SET_VEC4(c->builtins.gl_FragCoord, j, i, point.z, 1/vert->screen_space.w);
c->builtins.discard = GL_FALSE;
c->builtins.gl_FragDepth = point.z;
c->programs.a[c->cur_program].fragment_shader(fs_input, &c->builtins, c->programs.a[c->cur_program].uniform);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, j, i, c->builtins.gl_FragDepth);
}
}
}
static void run_pipeline(GLenum mode, GLint first, GLsizei count, GLsizei instance, GLuint base_instance, GLboolean use_elements)
{
unsigned int i, vert;
int provoke;
assert(count <= MAX_VERTICES);
vertex_stage(first, count, instance, base_instance, use_elements);
//fragment portion
if (mode == GL_POINTS) {
for (vert=0, i=first; i<first+count; ++i, ++vert) {
if (c->glverts.a[vert].clip_code)
continue;
c->glverts.a[vert].screen_space = mult_mat4_vec4(c->vp_mat, c->glverts.a[vert].clip_space);
draw_point(&c->glverts.a[vert]);
}
} else if (mode == GL_LINES) {
for (vert=0, i=first; i<first+count-1; i+=2, vert+=2) {
draw_line_clip(&c->glverts.a[vert], &c->glverts.a[vert+1]);
}
} else if (mode == GL_LINE_STRIP) {
for (vert=0, i=first; i<first+count-1; i++, vert++) {
draw_line_clip(&c->glverts.a[vert], &c->glverts.a[vert+1]);
}
} else if (mode == GL_LINE_LOOP) {
for (vert=0, i=first; i<first+count-1; i++, vert++) {
draw_line_clip(&c->glverts.a[vert], &c->glverts.a[vert+1]);
}
//draw ending line from last to first point
draw_line_clip(&c->glverts.a[count-1], &c->glverts.a[0]);
} else if (mode == GL_TRIANGLES) {
provoke = (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) ? 2 : 0;
for (vert=0, i=first; i<first+count-2; i+=3, vert+=3) {
draw_triangle(&c->glverts.a[vert], &c->glverts.a[vert+1], &c->glverts.a[vert+2], vert+provoke);
}
} else if (mode == GL_TRIANGLE_STRIP) {
unsigned int a=0, b=1, toggle = 0;
provoke = (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) ? 0 : -2;
for (vert=2; vert<count; ++vert) {
draw_triangle(&c->glverts.a[a], &c->glverts.a[b], &c->glverts.a[vert], vert+provoke);
if (!toggle)
a = vert;
else
b = vert;
toggle = !toggle;
}
} else if (mode == GL_TRIANGLE_FAN) {
provoke = (c->provoking_vert == GL_LAST_VERTEX_CONVENTION) ? 0 : -1;
for (vert=2; vert<count; ++vert) {
draw_triangle(&c->glverts.a[0], &c->glverts.a[vert-1], &c->glverts.a[vert], vert+provoke);
}
}
}
static int depthtest(float zval, float zbufval)
{
// TODO not sure if I should do this since it's supposed to prevent writing to the buffer
// but not afaik, change the result of the test
if (!c->depth_mask)
return 0;
switch (c->depth_func) {
case GL_LESS:
return zval < zbufval;
case GL_LEQUAL:
return zval <= zbufval;
case GL_GREATER:
return zval > zbufval;
case GL_GEQUAL:
return zval >= zbufval;
case GL_EQUAL:
return zval == zbufval;
case GL_NOTEQUAL:
return zval != zbufval;
case GL_ALWAYS:
return 1;
case GL_NEVER:
return 0;
}
return 0; //get rid of compile warning
}
static void setup_fs_input(float t, float* v1_out, float* v2_out, float wa, float wb, unsigned int provoke)
{
float* vs_output = &c->vs_output.output_buf.a[0];
float inv_wa = 1.0/wa;
float inv_wb = 1.0/wb;
for (int i=0; i<c->vs_output.size; ++i) {
if (c->vs_output.interpolation[i] == SMOOTH) {
c->fs_input[i] = (v1_out[i]*inv_wa + t*(v2_out[i]*inv_wb - v1_out[i]*inv_wa)) / (inv_wa + t*(inv_wb - inv_wa));
} else if (c->vs_output.interpolation[i] == NOPERSPECTIVE) {
c->fs_input[i] = v1_out[i] + t*(v2_out[i] - v1_out[i]);
} else {
c->fs_input[i] = vs_output[provoke*c->vs_output.size + i];
}
}
c->builtins.discard = GL_FALSE;
}
/* Line Clipping algorithm from 'Computer Graphics', Principles and
Practice */
static inline int clip_line(float denom, float num, float* tmin, float* tmax)
{
float t;
if (denom > 0) {
t = num / denom;
if (t > *tmax) return 0;
if (t > *tmin) {
*tmin = t;
//printf("t > *tmin %f\n", t);
}
} else if (denom < 0) {
t = num / denom;
if (t < *tmin) return 0;
if (t < *tmax) {
*tmax = t;
//printf("t < *tmax %f\n", t);
}
} else if (num > 0) return 0;
return 1;
}
static void interpolate_clipped_line(glVertex* v1, glVertex* v2, float* v1_out, float* v2_out, float tmin, float tmax)
{
for (int i=0; i<c->vs_output.size; ++i) {
v1_out[i] = v1->vs_out[i] + (v2->vs_out[i] - v1->vs_out[i])*tmin;
v2_out[i] = v1->vs_out[i] + (v2->vs_out[i] - v1->vs_out[i])*tmax;
//v2_out[i] = (1 - tmax)*v1->vs_out[i] + tmax*v2->vs_out[i];
}
}
static void draw_line_clip(glVertex* v1, glVertex* v2)
{
int cc1, cc2;
vec4 d, p1, p2, t1, t2;
float tmin, tmax;
cc1 = v1->clip_code;
cc2 = v2->clip_code;
p1 = v1->clip_space;
p2 = v2->clip_space;
float v1_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
float v2_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
//TODO ponder this
unsigned int provoke;
if (c->provoking_vert == GL_LAST_VERTEX_CONVENTION)
provoke = (v2 - c->glverts.a)/sizeof(glVertex);
else
provoke = (v1 - c->glverts.a)/sizeof(glVertex);
if (cc1 & cc2) {
return;
} else if ((cc1 | cc2) == 0) {
t1 = mult_mat4_vec4(c->vp_mat, p1);
t2 = mult_mat4_vec4(c->vp_mat, p2);
//no need
//memcpy(v1_out, v1->vs_out, c->vs_output.size*sizeof(float));
//memcpy(v2_out, v2->vs_out, c->vs_output.size*sizeof(float));
if (!c->line_smooth)
draw_line_shader(t1, t2, v1->vs_out, v2->vs_out, provoke);
else
draw_line_smooth_shader(t1, t2, v1->vs_out, v2->vs_out, provoke);
} else {
d = sub_vec4s(p2, p1);
tmin = 0;
tmax = 1;
if (clip_line( d.x+d.w, -p1.x-p1.w, &tmin, &tmax) &&
clip_line(-d.x+d.w, p1.x-p1.w, &tmin, &tmax) &&
clip_line( d.y+d.w, -p1.y-p1.w, &tmin, &tmax) &&
clip_line(-d.y+d.w, p1.y-p1.w, &tmin, &tmax) &&
clip_line( d.z+d.w, -p1.z-p1.w, &tmin, &tmax) &&
clip_line(-d.z+d.w, p1.z-p1.w, &tmin, &tmax)) {
//printf("%f %f\n", tmin, tmax);
t1 = add_vec4s(p1, scale_vec4(d, tmin));
t2 = add_vec4s(p1, scale_vec4(d, tmax));
t1 = mult_mat4_vec4(c->vp_mat, t1);
t2 = mult_mat4_vec4(c->vp_mat, t2);
//print_vec4(t1, "\n");
//print_vec4(t2, "\n");
interpolate_clipped_line(v1, v2, v1_out, v2_out, tmin, tmax);
if (!c->line_smooth)
draw_line_shader(t1, t2, v1_out, v2_out, provoke);
else
draw_line_smooth_shader(t1, t2, v1_out, v2_out, provoke);
}
}
}
static void draw_line_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke)
{
float tmp;
float* tmp_ptr;
vec3 hp1 = vec4_to_vec3h(v1);
vec3 hp2 = vec4_to_vec3h(v2);
//print_vec3(hp1, "\n");
//print_vec3(hp2, "\n");
float w1 = v1.w;
float w2 = v2.w;
float x1 = hp1.x, x2 = hp2.x, y1 = hp1.y, y2 = hp2.y;
float z1 = hp1.z, z2 = hp2.z;
//always draw from left to right
if (x2 < x1) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
tmp = z1;
z1 = z2;
z2 = tmp;
tmp = w1;
w1 = w2;
w2 = tmp;
tmp_ptr = v1_out;
v1_out = v2_out;
v2_out = tmp_ptr;
}
//calculate slope and implicit line parameters once
//could just use my Line type/constructor as in draw_triangle
float m = (y2-y1)/(x2-x1);
Line line = make_Line(x1, y1, x2, y2);
float t, x, y, z, w;
vec2 p1 = { x1, y1 }, p2 = { x2, y2 };
vec2 pr, sub_p2p1 = sub_vec2s(p2, p1);
float line_length_squared = length_vec2(sub_p2p1);
line_length_squared *= line_length_squared;
frag_func fragment_shader = c->programs.a[c->cur_program].fragment_shader;
void* uniform = c->programs.a[c->cur_program].uniform;
int fragdepth_or_discard = c->programs.a[c->cur_program].fragdepth_or_discard;
float i_x1, i_y1, i_x2, i_y2;
i_x1 = floor(p1.x) + 0.5;
i_y1 = floor(p1.y) + 0.5;
i_x2 = floor(p2.x) + 0.5;
i_y2 = floor(p2.y) + 0.5;
float x_min, x_max, y_min, y_max;
x_min = i_x1;
x_max = i_x2; //always left to right;
if (m <= 0) {
y_min = i_y2;
y_max = i_y1;
} else {
y_min = i_y1;
y_max = i_y2;
}
//printf("%f %f %f %f =\n", i_x1, i_y1, i_x2, i_y2);
//printf("%f %f %f %f x_min etc\n", x_min, x_max, y_min, y_max);
z1 = MAP(z1, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far);
z2 = MAP(z2, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far);
//4 cases based on slope
if (m <= -1) { //(-infinite, -1]
//printf("slope <= -1\n");
for (x = x_min, y = y_max; y>=y_min && x<=x_max; --y) {
pr.x = x;
pr.y = y;
t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared;
z = (1 - t) * z1 + t * z2;
w = (1 - t) * w1 + t * w2;
SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w);
c->builtins.discard = GL_FALSE;
c->builtins.gl_FragDepth = z;
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth);
line_1:
if (line_func(&line, x+0.5f, y-1) < 0) //A*(x+0.5f) + B*(y-1) + C < 0)
++x;
}
} else if (m <= 0) { //(-1, 0]
//printf("slope = (-1, 0]\n");
for (x = x_min, y = y_max; x<=x_max && y>=y_min; ++x) {
pr.x = x;
pr.y = y;
t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared;
z = (1 - t) * z1 + t * z2;
w = (1 - t) * w1 + t * w2;
SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w);
c->builtins.discard = GL_FALSE;
c->builtins.gl_FragDepth = z;
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth);
line_2:
if (line_func(&line, x+1, y-0.5f) > 0) //A*(x+1) + B*(y-0.5f) + C > 0)
--y;
}
} else if (m <= 1) { //(0, 1]
//printf("slope = (0, 1]\n");
for (x = x_min, y = y_min; x <= x_max && y <= y_max; ++x) {
pr.x = x;
pr.y = y;
t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared;
z = (1 - t) * z1 + t * z2;
w = (1 - t) * w1 + t * w2;
SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w);
c->builtins.discard = GL_FALSE;
c->builtins.gl_FragDepth = z;
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth);
line_3:
if (line_func(&line, x+1, y+0.5f) < 0) //A*(x+1) + B*(y+0.5f) + C < 0)
++y;
}
} else { //(1, +infinite)
//printf("slope > 1\n");
for (x = x_min, y = y_min; y<=y_max && x <= x_max; ++y) {
pr.x = x;
pr.y = y;
t = dot_vec2s(sub_vec2s(pr, p1), sub_p2p1) / line_length_squared;
z = (1 - t) * z1 + t * z2;
w = (1 - t) * w1 + t * w2;
SET_VEC4(c->builtins.gl_FragCoord, x, y, z, 1/w);
c->builtins.discard = GL_FALSE;
c->builtins.gl_FragDepth = z;
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, x, y, c->builtins.gl_FragDepth);
line_4:
if (line_func(&line, x+0.5f, y+1) > 0) //A*(x+0.5f) + B*(y+1) + C > 0)
++x;
}
}
}
// WARNING: this function is subject to serious change or removal and is currently unused (GL_LINE_SMOOTH unsupported)
// TODO do it right, handle depth test correctly since we moved it into draw_pixel
static void draw_line_smooth_shader(vec4 v1, vec4 v2, float* v1_out, float* v2_out, unsigned int provoke)
{
float tmp;
float* tmp_ptr;
frag_func fragment_shader = c->programs.a[c->cur_program].fragment_shader;
void* uniform = c->programs.a[c->cur_program].uniform;
int fragdepth_or_discard = c->programs.a[c->cur_program].fragdepth_or_discard;
vec3 hp1 = vec4_to_vec3h(v1);
vec3 hp2 = vec4_to_vec3h(v2);
float x1 = hp1.x, x2 = hp2.x, y1 = hp1.y, y2 = hp2.y;
float z1 = hp1.z, z2 = hp2.z;
float w1 = v1.w;
float w2 = v2.w;
int x, j;
int steep = fabsf(y2 - y1) > fabsf(x2 - x1);
if (steep) {
tmp = x1;
x1 = y1;
y1 = tmp;
tmp = x2;
x2 = y2;
y2 = tmp;
}
if (x1 > x2) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
tmp = z1;
z1 = z2;
z2 = tmp;
tmp = w1;
w1 = w2;
w2 = tmp;
tmp_ptr = v1_out;
v1_out = v2_out;
v2_out = tmp_ptr;
}
float dx = x2 - x1;
float dy = y2 - y1;
float gradient = dy / dx;
float xend = x1 + 0.5f;
float yend = y1 + gradient * (xend - x1);
float xgap = 1.0 - modff(x1 + 0.5, &tmp);
float xpxl1 = xend;
float ypxl1;
modff(yend, &ypxl1);
//choose to compare against just one pixel for depth test instead of both
z1 = MAP(z1, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far);
if (steep) {
if (!c->depth_test || (!fragdepth_or_discard &&
depthtest(z1, ((float*)c->zbuf.lastrow)[-(int)xpxl1*c->zbuf.w + (int)ypxl1]))) {
if (!c->fragdepth_or_discard && c->depth_test) { //hate this double check but depth buf is only update if enabled
((float*)c->zbuf.lastrow)[-(int)xpxl1*c->zbuf.w + (int)ypxl1] = z1;
((float*)c->zbuf.lastrow)[-(int)xpxl1*c->zbuf.w + (int)(ypxl1+1)] = z1;
}
SET_VEC4(c->builtins.gl_FragCoord, ypxl1, xpxl1, z1, 1/w1);
setup_fs_input(0, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, ypxl1, xpxl1, c->builtins.gl_FragDepth);
SET_VEC4(c->builtins.gl_FragCoord, ypxl1+1, xpxl1, z1, 1/w1);
setup_fs_input(0, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = modff(yend, &tmp) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, ypxl1+1, xpxl1, c->builtins.gl_FragDepth);
}
} else {
if (!c->depth_test || (!fragdepth_or_discard &&
depthtest(z1, ((float*)c->zbuf.lastrow)[-(int)ypxl1*c->zbuf.w + (int)xpxl1]))) {
if (!c->fragdepth_or_discard && c->depth_test) { //hate this double check but depth buf is only update if enabled
((float*)c->zbuf.lastrow)[-(int)ypxl1*c->zbuf.w + (int)xpxl1] = z1;
((float*)c->zbuf.lastrow)[-(int)(ypxl1+1)*c->zbuf.w + (int)xpxl1] = z1;
}
SET_VEC4(c->builtins.gl_FragCoord, xpxl1, ypxl1, z1, 1/w1);
setup_fs_input(0, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, xpxl1, ypxl1, c->builtins.gl_FragDepth);
SET_VEC4(c->builtins.gl_FragCoord, xpxl1, ypxl1+1, z1, 1/w1);
setup_fs_input(0, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = modff(yend, &tmp) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, xpxl1, ypxl1+1, c->builtins.gl_FragDepth);
}
}
float intery = yend + gradient; //first y-intersection for main loop
xend = x2 + 0.5f;
yend = y2 + gradient * (xend - x2);
xgap = modff(x2 + 0.5, &tmp);
float xpxl2 = xend;
float ypxl2;
modff(yend, &ypxl2);
z2 = MAP(z2, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far);
if (steep) {
if (!c->depth_test || (!fragdepth_or_discard &&
depthtest(z2, ((float*)c->zbuf.lastrow)[-(int)xpxl2*c->zbuf.w + (int)ypxl2]))) {
if (!c->fragdepth_or_discard && c->depth_test) {
((float*)c->zbuf.lastrow)[-(int)xpxl2*c->zbuf.w + (int)ypxl2] = z2;
((float*)c->zbuf.lastrow)[-(int)xpxl2*c->zbuf.w + (int)(ypxl2+1)] = z2;
}
SET_VEC4(c->builtins.gl_FragCoord, ypxl2, xpxl2, z2, 1/w2);
setup_fs_input(1, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, ypxl2, xpxl2, c->builtins.gl_FragDepth);
SET_VEC4(c->builtins.gl_FragCoord, ypxl2+1, xpxl2, z2, 1/w2);
setup_fs_input(1, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = modff(yend, &tmp) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, ypxl2+1, xpxl2, c->builtins.gl_FragDepth);
}
} else {
if (!c->depth_test || (!fragdepth_or_discard &&
depthtest(z2, ((float*)c->zbuf.lastrow)[-(int)ypxl2*c->zbuf.w + (int)xpxl2]))) {
if (!c->fragdepth_or_discard && c->depth_test) {
((float*)c->zbuf.lastrow)[-(int)ypxl2*c->zbuf.w + (int)xpxl2] = z2;
((float*)c->zbuf.lastrow)[-(int)(ypxl2+1)*c->zbuf.w + (int)xpxl2] = z2;
}
SET_VEC4(c->builtins.gl_FragCoord, xpxl2, ypxl2, z2, 1/w2);
setup_fs_input(1, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = (1.0 - modff(yend, &tmp)) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, xpxl2, ypxl2, c->builtins.gl_FragDepth);
SET_VEC4(c->builtins.gl_FragCoord, xpxl2, ypxl2+1, z2, 1/w2);
setup_fs_input(1, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = modff(yend, &tmp) * xgap;
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, xpxl2, ypxl2+1, c->builtins.gl_FragDepth);
}
}
//use the fast, inaccurate calculation of t since this algorithm is already
//slower than the normal line drawing, pg 111 glspec if I ever want to fix it
float range = ceil(x2-x1);
float t, z, w;
for (j=1, x = xpxl1 + 1; x < xpxl2; ++x, ++j, intery += gradient) {
t = j/range;
z = (1 - t) * z1 + t * z2;
w = (1 - t) * w1 + t * w2;
if (steep) {
if (!c->fragdepth_or_discard && c->depth_test) {
if (!depthtest(z, ((float*)c->zbuf.lastrow)[-(int)x*c->zbuf.w + (int)intery])) {
continue;
} else {
((float*)c->zbuf.lastrow)[-(int)x*c->zbuf.w + (int)intery] = z;
((float*)c->zbuf.lastrow)[-(int)x*c->zbuf.w + (int)(intery+1)] = z;
}
}
SET_VEC4(c->builtins.gl_FragCoord, intery, x, z, 1/w);
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = 1.0 - modff(intery, &tmp);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, intery, x, c->builtins.gl_FragDepth);
SET_VEC4(c->builtins.gl_FragCoord, intery+1, x, z, 1/w);
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = modff(intery, &tmp);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, intery+1, x, c->builtins.gl_FragDepth);
} else {
if (!c->fragdepth_or_discard && c->depth_test) {
if (!depthtest(z, ((float*)c->zbuf.lastrow)[-(int)intery*c->zbuf.w + (int)x])) {
continue;
} else {
((float*)c->zbuf.lastrow)[-(int)intery*c->zbuf.w + (int)x] = z;
((float*)c->zbuf.lastrow)[-(int)(intery+1)*c->zbuf.w + (int)x] = z;
}
}
SET_VEC4(c->builtins.gl_FragCoord, x, intery, z, 1/w);
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = 1.0 - modff(intery, &tmp);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, x, intery, c->builtins.gl_FragDepth);
SET_VEC4(c->builtins.gl_FragCoord, x, intery+1, z, 1/w);
setup_fs_input(t, v1_out, v2_out, w1, w2, provoke);
fragment_shader(c->fs_input, &c->builtins, uniform);
//fragcolor.w = modff(intery, &tmp);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, x, intery+1, c->builtins.gl_FragDepth);
}
}
}
static void draw_triangle(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke)
{
int c_or, c_and;
c_and = v0->clip_code & v1->clip_code & v2->clip_code;
if (c_and != 0) {
//printf("triangle outside\n");
return;
}
c_or = v0->clip_code | v1->clip_code | v2->clip_code;
if (c_or == 0) {
draw_triangle_final(v0, v1, v2, provoke);
} else {
draw_triangle_clip(v0, v1, v2, provoke, 0);
}
}
static void draw_triangle_final(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke)
{
int front_facing;
v0->screen_space = mult_mat4_vec4(c->vp_mat, v0->clip_space);
v1->screen_space = mult_mat4_vec4(c->vp_mat, v1->clip_space);
v2->screen_space = mult_mat4_vec4(c->vp_mat, v2->clip_space);
front_facing = is_front_facing(v0, v1, v2);
if (c->cull_face) {
if (c->cull_mode == GL_FRONT_AND_BACK)
return;
if (c->cull_mode == GL_BACK && !front_facing) {
//printf("culling back face\n");
return;
}
if (c->cull_mode == GL_FRONT && front_facing)
return;
}
c->builtins.gl_FrontFacing = front_facing;
if (front_facing) {
c->draw_triangle_front(v0, v1, v2, provoke);
} else {
c->draw_triangle_back(v0, v1, v2, provoke);
}
}
/* We clip the segment [a,b] against the 6 planes of the normal volume.
* We compute the point 'c' of intersection and the value of the parameter 't'
* of the intersection if x=a+t(b-a).
*/
#define clip_func(name, sign, dir, dir1, dir2) \
static float name(vec4 *c, vec4 *a, vec4 *b) \
{\
float t, dx, dy, dz, dw, den;\
dx = (b->x - a->x);\
dy = (b->y - a->y);\
dz = (b->z - a->z);\
dw = (b->w - a->w);\
den = -(sign d ## dir) + dw;\
if (den == 0) t=0;\
else t = ( sign a->dir - a->w) / den;\
c->dir1 = a->dir1 + t * d ## dir1;\
c->dir2 = a->dir2 + t * d ## dir2;\
c->w = a->w + t * dw;\
c->dir = sign c->w;\
return t;\
}
clip_func(clip_xmin, -, x, y, z)
clip_func(clip_xmax, +, x, y, z)
clip_func(clip_ymin, -, y, x, z)
clip_func(clip_ymax, +, y, x, z)
clip_func(clip_zmin, -, z, x, y)
clip_func(clip_zmax, +, z, x, y)
static float (*clip_proc[6])(vec4 *, vec4 *, vec4 *) = {
clip_zmin, clip_zmax,
clip_xmin, clip_xmax,
clip_ymin, clip_ymax
};
static inline void update_clip_pt(glVertex *q, glVertex *v0, glVertex *v1, float t)
{
for (int i=0; i<c->vs_output.size; ++i) {
//why is this correct for both SMOOTH and NOPERSPECTIVE?
q->vs_out[i] = v0->vs_out[i] + (v1->vs_out[i] - v0->vs_out[i]) * t;
//FLAT should be handled indirectly by the provoke index
//nothing to do here unless I change that
}
q->clip_code = gl_clipcode(q->clip_space);
/*
* this is done in draw_triangle currently ...
q->screen_space = mult_mat4_vec4(c->vp_mat, q->clip_space);
if (q->clip_code == 0)
q->screen_space = mult_mat4_vec4(c->vp_mat, q->clip_space);
*/
}
static void draw_triangle_clip(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke, int clip_bit)
{
int c_or, c_and, c_ex_or, cc[3], edge_flag_tmp, clip_mask;
glVertex tmp1, tmp2, *q[3];
float tt;
//quite a bit of stack if there's a lot of clipping ...
float tmp1_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
float tmp2_out[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
tmp1.vs_out = tmp1_out;
tmp2.vs_out = tmp2_out;
cc[0] = v0->clip_code;
cc[1] = v1->clip_code;
cc[2] = v2->clip_code;
/*
printf("in draw_triangle_clip\n");
print_vec4(v0->clip_space, "\n");
print_vec4(v1->clip_space, "\n");
print_vec4(v2->clip_space, "\n");
printf("tmp_out tmp2_out = %p %p\n\n", tmp1_out, tmp2_out);
*/
c_or = cc[0] | cc[1] | cc[2];
if (c_or == 0) {
draw_triangle_final(v0, v1, v2, provoke);
} else {
c_and = cc[0] & cc[1] & cc[2];
/* the triangle is completely outside */
if (c_and != 0) {
//printf("triangle outside\n");
return;
}
/* find the next direction to clip */
while (clip_bit < 6 && (c_or & (1 << clip_bit)) == 0) {
++clip_bit;
}
/* this test can be true only in case of rounding errors */
if (clip_bit == 6) {
#if 1
printf("Clipping error:\n");
print_vec4(v0->clip_space, "\n");
print_vec4(v1->clip_space, "\n");
print_vec4(v2->clip_space, "\n");
#endif
return;
}
clip_mask = 1 << clip_bit;
c_ex_or = (cc[0] ^ cc[1] ^ cc[2]) & clip_mask;
if (c_ex_or) {
/* one point outside */
if (cc[0] & clip_mask) { q[0]=v0; q[1]=v1; q[2]=v2; }
else if (cc[1] & clip_mask) { q[0]=v1; q[1]=v2; q[2]=v0; }
else { q[0]=v2; q[1]=v0; q[2]=v1; }
tt = clip_proc[clip_bit](&tmp1.clip_space, &q[0]->clip_space, &q[1]->clip_space);
update_clip_pt(&tmp1, q[0], q[1], tt);
tt = clip_proc[clip_bit](&tmp2.clip_space, &q[0]->clip_space, &q[2]->clip_space);
update_clip_pt(&tmp2, q[0], q[2], tt);
tmp1.edge_flag = q[0]->edge_flag;
edge_flag_tmp = q[2]->edge_flag;
q[2]->edge_flag = 0;
draw_triangle_clip(&tmp1, q[1], q[2], provoke, clip_bit+1);
tmp2.edge_flag = 1;
tmp1.edge_flag = 0;
q[2]->edge_flag = edge_flag_tmp;
draw_triangle_clip(&tmp2, &tmp1, q[2], provoke, clip_bit+1);
} else {
/* two points outside */
if ((cc[0] & clip_mask) == 0) { q[0]=v0; q[1]=v1; q[2]=v2; }
else if ((cc[1] & clip_mask) == 0) { q[0]=v1; q[1]=v2; q[2]=v0; }
else { q[0]=v2; q[1]=v0; q[2]=v1; }
tt = clip_proc[clip_bit](&tmp1.clip_space, &q[0]->clip_space, &q[1]->clip_space);
update_clip_pt(&tmp1, q[0], q[1], tt);
tt = clip_proc[clip_bit](&tmp2.clip_space, &q[0]->clip_space, &q[2]->clip_space);
update_clip_pt(&tmp2, q[0], q[2], tt);
tmp1.edge_flag = 1;
tmp2.edge_flag = q[2]->edge_flag;
draw_triangle_clip(q[0], &tmp1, &tmp2, provoke, clip_bit+1);
}
}
}
static void draw_triangle_point(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke)
{
//TODO provoke?
float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
vec3 point;
glVertex* vert[3] = { v0, v1, v2 };
for (int i=0; i<3; ++i) {
if (!vert[i]->edge_flag) //TODO doesn't work
continue;
point = vec4_to_vec3h(vert[i]->screen_space);
point.z = MAP(point.z, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far);
//TODO not sure if I'm supposed to do this ... doesn't say to in spec but it is called depth clamping
if (c->depth_clamp)
point.z = clampf_01(point.z);
for (int j=0; j<c->vs_output.size; ++j) {
if (c->vs_output.interpolation[j] != FLAT) {
fs_input[j] = vert[i]->vs_out[j]; //would be correct from clipping
} else {
fs_input[j] = c->vs_output.output_buf.a[provoke*c->vs_output.size + j];
}
}
c->builtins.discard = GL_FALSE;
c->builtins.gl_FragDepth = point.z;
c->programs.a[c->cur_program].fragment_shader(fs_input, &c->builtins, c->programs.a[c->cur_program].uniform);
if (!c->builtins.discard)
draw_pixel(c->builtins.gl_FragColor, point.x, point.y, c->builtins.gl_FragDepth);
}
}
static void draw_triangle_line(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke)
{
if (v0->edge_flag)
draw_line_shader(v0->screen_space, v1->screen_space, v0->vs_out, v1->vs_out, provoke);
if (v1->edge_flag)
draw_line_shader(v1->screen_space, v2->screen_space, v1->vs_out, v2->vs_out, provoke);
if (v2->edge_flag)
draw_line_shader(v2->screen_space, v0->screen_space, v2->vs_out, v0->vs_out, provoke);
}
static void draw_triangle_fill(glVertex* v0, glVertex* v1, glVertex* v2, unsigned int provoke)
{
vec4 p0 = v0->screen_space;
vec4 p1 = v1->screen_space;
vec4 p2 = v2->screen_space;
vec3 hp0 = vec4_to_vec3h(p0);
vec3 hp1 = vec4_to_vec3h(p1);
vec3 hp2 = vec4_to_vec3h(p2);
// TODO even worth calculating or just some constant?
float max_depth_slope = 0;
float poly_offset = 0;
if (c->poly_offset) {
float dzxy[6];
dzxy[0] = fabsf((hp1.z - hp0.z)/(hp1.x - hp0.x));
dzxy[1] = fabsf((hp1.z - hp0.z)/(hp1.y - hp0.y));
dzxy[2] = fabsf((hp2.z - hp1.z)/(hp2.x - hp1.x));
dzxy[3] = fabsf((hp2.z - hp1.z)/(hp2.y - hp1.y));
dzxy[4] = fabsf((hp0.z - hp2.z)/(hp0.x - hp2.x));
dzxy[5] = fabsf((hp0.z - hp2.z)/(hp0.y - hp2.y));
max_depth_slope = dzxy[0];
for (int i=1; i<6; ++i) {
if (dzxy[i] > max_depth_slope)
max_depth_slope = dzxy[i];
}
#define SMALLEST_INCR 0.000001;
poly_offset = max_depth_slope * c->poly_factor + c->poly_units * SMALLEST_INCR;
}
/*
print_vec4(hp0, "\n");
print_vec4(hp1, "\n");
print_vec4(hp2, "\n");
printf("%f %f %f\n", p0.w, p1.w, p2.w);
print_vec3(hp0, "\n");
print_vec3(hp1, "\n");
print_vec3(hp2, "\n\n");
*/
//can't think of a better/cleaner way to do this than these 8 lines
float x_min = MIN(hp0.x, hp1.x);
float x_max = MAX(hp0.x, hp1.x);
float y_min = MIN(hp0.y, hp1.y);
float y_max = MAX(hp0.y, hp1.y);
x_min = MIN(hp2.x, x_min);
x_max = MAX(hp2.x, x_max);
y_min = MIN(hp2.y, y_min);
y_max = MAX(hp2.y, y_max);
int ix_max = roundf(x_max);
int iy_max = roundf(y_max);
/*
* testing without this
x_min = MAX(0, x_min);
x_max = MIN(c->back_buffer.w-1, x_max);
y_min = MAX(0, y_min);
y_max = MIN(c->back_buffer.h-1, y_max);
x_min = MAX(c->x_min, x_min);
x_max = MIN(c->x_max, x_max);
y_min = MAX(c->y_min, y_min);
y_max = MIN(c->y_max, y_max);
*/
//form implicit lines
Line l01 = make_Line(hp0.x, hp0.y, hp1.x, hp1.y);
Line l12 = make_Line(hp1.x, hp1.y, hp2.x, hp2.y);
Line l20 = make_Line(hp2.x, hp2.y, hp0.x, hp0.y);
float alpha, beta, gamma, tmp, tmp2, z;
float fs_input[GL_MAX_VERTEX_OUTPUT_COMPONENTS];
float perspective[GL_MAX_VERTEX_OUTPUT_COMPONENTS*3];
float* vs_output = &c->vs_output.output_buf.a[0];
for (int i=0; i<c->vs_output.size; ++i) {
perspective[i] = v0->vs_out[i]/p0.w;
perspective[GL_MAX_VERTEX_OUTPUT_COMPONENTS + i] = v1->vs_out[i]/p1.w;
perspective[2*GL_MAX_VERTEX_OUTPUT_COMPONENTS + i] = v2->vs_out[i]/p2.w;
}
float inv_w0 = 1/p0.w; //is this worth it? faster than just dividing by w down below?
float inv_w1 = 1/p1.w;
float inv_w2 = 1/p2.w;
float x, y;
Shader_Builtins builtins;
#pragma omp parallel for private(x, y, alpha, beta, gamma, z, tmp, tmp2, builtins, fs_input)
for (int iy = y_min; iy<iy_max; ++iy) {
y = iy + 0.5f;
for (int ix = x_min; ix<ix_max; ++ix) {
x = ix + 0.5f; //center of min pixel
//see page 117 of glspec for alternative method
gamma = line_func(&l01, x, y)/line_func(&l01, hp2.x, hp2.y);
beta = line_func(&l20, x, y)/line_func(&l20, hp1.x, hp1.y);
alpha = 1 - beta - gamma;
if (alpha >= 0 && beta >= 0 && gamma >= 0) {
//if it's on the edge (==0), draw if the opposite vertex is on the same side as arbitrary point -1, -2.5
//this is a deterministic way of choosing which triangle gets a pixel for triangles that share
//edges (see commit message for e87e324)
if ((alpha > 0 || line_func(&l12, hp0.x, hp0.y) * line_func(&l12, -1, -2.5) > 0) &&
(beta > 0 || line_func(&l20, hp1.x, hp1.y) * line_func(&l20, -1, -2.5) > 0) &&
(gamma > 0 || line_func(&l01, hp2.x, hp2.y) * line_func(&l01, -1, -2.5) > 0)) {
//calculate interoplation here
tmp2 = alpha*inv_w0 + beta*inv_w1 + gamma*inv_w2;
z = alpha * hp0.z + beta * hp1.z + gamma * hp2.z;
z = MAP(z, -1.0f, 1.0f, c->depth_range_near, c->depth_range_far); //TODO move out (ie can I map hp1.z etc.)?
z += poly_offset;
// TODO have a macro that turns on pre-fragment shader depthtest/scissor test?
for (int i=0; i<c->vs_output.size; ++i) {
if (c->vs_output.interpolation[i] == SMOOTH) {
tmp = alpha*perspective[i] + beta*perspective[GL_MAX_VERTEX_OUTPUT_COMPONENTS + i] + gamma*perspective[2*GL_MAX_VERTEX_OUTPUT_COMPONENTS + i];
fs_input[i] = tmp/tmp2;
} else if (c->vs_output.interpolation[i] == NOPERSPECTIVE) {
fs_input[i] = alpha * v0->vs_out[i] + beta * v1->vs_out[i] + gamma * v2->vs_out[i];
} else { // == FLAT
fs_input[i] = vs_output[provoke*c->vs_output.size + i];
}
}
// tmp2 is 1/w interpolated... I now do that everywhere (draw_line, draw_point)
SET_VEC4(builtins.gl_FragCoord, x, y, z, tmp2);
builtins.discard = GL_FALSE;
builtins.gl_FragDepth = z;
// have to do this here instead of outside the loop because somehow openmp messes it up
// TODO probably some way to prevent that but it's just copying an int so no big deal
builtins.gl_InstanceID = c->builtins.gl_InstanceID;
c->programs.a[c->cur_program].fragment_shader(fs_input, &builtins, c->programs.a[c->cur_program].uniform);
if (!builtins.discard) {
draw_pixel(builtins.gl_FragColor, x, y, builtins.gl_FragDepth);
}
}
}
}
}
}
// TODO should this be done in colors/integers not vec4/floats?
static Color blend_pixel(vec4 src, vec4 dst)
{
vec4* cnst = &c->blend_color;
float i = MIN(src.w, 1-dst.w);
vec4 Cs, Cd;
switch (c->blend_sfactor) {
case GL_ZERO: SET_VEC4(Cs, 0,0,0,0); break;
case GL_ONE: SET_VEC4(Cs, 1,1,1,1); break;
case GL_SRC_COLOR: Cs = src; break;
case GL_ONE_MINUS_SRC_COLOR: SET_VEC4(Cs, 1-src.x,1-src.y,1-src.z,1-src.w); break;
case GL_DST_COLOR: Cs = dst; break;
case GL_ONE_MINUS_DST_COLOR: SET_VEC4(Cs, 1-dst.x,1-dst.y,1-dst.z,1-dst.w); break;
case GL_SRC_ALPHA: SET_VEC4(Cs, src.w, src.w, src.w, src.w); break;
case GL_ONE_MINUS_SRC_ALPHA: SET_VEC4(Cs, 1-src.w,1-src.w,1-src.w,1-src.w); break;
case GL_DST_ALPHA: SET_VEC4(Cs, dst.w, dst.w, dst.w, dst.w); break;
case GL_ONE_MINUS_DST_ALPHA: SET_VEC4(Cs, 1-dst.w,1-dst.w,1-dst.w,1-dst.w); break;
case GL_CONSTANT_COLOR: Cs = *cnst; break;
case GL_ONE_MINUS_CONSTANT_COLOR: SET_VEC4(Cs, 1-cnst->x,1-cnst->y,1-cnst->z,1-cnst->w); break;
case GL_CONSTANT_ALPHA: SET_VEC4(Cs, cnst->w, cnst->w, cnst->w, cnst->w); break;
case GL_ONE_MINUS_CONSTANT_ALPHA: SET_VEC4(Cs, 1-cnst->w,1-cnst->w,1-cnst->w,1-cnst->w); break;
case GL_SRC_ALPHA_SATURATE: SET_VEC4(Cs, i, i, i, 1); break;
/*not implemented yet
* won't be until I implement dual source blending/dual output from frag shader
*https://www.opengl.org/wiki/Blending#Dual_Source_Blending
case GL_SRC1_COLOR: Cs = break;
case GL_ONE_MINUS_SRC1_COLOR: Cs = break;
case GL_SRC1_ALPHA: Cs = break;
case GL_ONE_MINUS_SRC1_ALPHA: Cs = break;
*/
default:
//should never get here
printf("error unrecognized blend_sfactor!\n");
break;
}
switch (c->blend_dfactor) {
case GL_ZERO: SET_VEC4(Cd, 0,0,0,0); break;
case GL_ONE: SET_VEC4(Cd, 1,1,1,1); break;
case GL_SRC_COLOR: Cd = src; break;
case GL_ONE_MINUS_SRC_COLOR: SET_VEC4(Cd, 1-src.x,1-src.y,1-src.z,1-src.w); break;
case GL_DST_COLOR: Cd = dst; break;
case GL_ONE_MINUS_DST_COLOR: SET_VEC4(Cd, 1-dst.x,1-dst.y,1-dst.z,1-dst.w); break;
case GL_SRC_ALPHA: SET_VEC4(Cd, src.w, src.w, src.w, src.w); break;
case GL_ONE_MINUS_SRC_ALPHA: SET_VEC4(Cd, 1-src.w,1-src.w,1-src.w,1-src.w); break;
case GL_DST_ALPHA: SET_VEC4(Cd, dst.w, dst.w, dst.w, dst.w); break;
case GL_ONE_MINUS_DST_ALPHA: SET_VEC4(Cd, 1-dst.w,1-dst.w,1-dst.w,1-dst.w); break;
case GL_CONSTANT_COLOR: Cd = *cnst; break;
case GL_ONE_MINUS_CONSTANT_COLOR: SET_VEC4(Cd, 1-cnst->x,1-cnst->y,1-cnst->z,1-cnst->w); break;
case GL_CONSTANT_ALPHA: SET_VEC4(Cd, cnst->w, cnst->w, cnst->w, cnst->w); break;
case GL_ONE_MINUS_CONSTANT_ALPHA: SET_VEC4(Cd, 1-cnst->w,1-cnst->w,1-cnst->w,1-cnst->w); break;
case GL_SRC_ALPHA_SATURATE: SET_VEC4(Cd, i, i, i, 1); break;
/*not implemented yet
case GL_SRC_ALPHA_SATURATE: Cd = break;
case GL_SRC1_COLOR: Cd = break;
case GL_ONE_MINUS_SRC1_COLOR: Cd = break;
case GL_SRC1_ALPHA: Cd = break;
case GL_ONE_MINUS_SRC1_ALPHA: Cd = break;
*/
default:
//should never get here
printf("error unrecognized blend_dfactor!\n");
break;
}
vec4 result;
switch (c->blend_equation) {
case GL_FUNC_ADD:
result = add_vec4s(mult_vec4s(Cs, src), mult_vec4s(Cd, dst));
break;
case GL_FUNC_SUBTRACT:
result = sub_vec4s(mult_vec4s(Cs, src), mult_vec4s(Cd, dst));
break;
case GL_FUNC_REVERSE_SUBTRACT:
result = sub_vec4s(mult_vec4s(Cd, dst), mult_vec4s(Cs, src));
break;
case GL_MIN:
SET_VEC4(result, MIN(src.x, dst.x), MIN(src.y, dst.y), MIN(src.z, dst.z), MIN(src.w, dst.w));
break;
case GL_MAX:
SET_VEC4(result, MAX(src.x, dst.x), MAX(src.y, dst.y), MAX(src.z, dst.z), MAX(src.w, dst.w));
break;
default:
//should never get here
printf("error unrecognized blend_equation!\n");
break;
}
return vec4_to_Color(result);
}
// source and destination colors
static Color logic_ops_pixel(Color s, Color d)
{
switch (c->logic_func) {
case GL_CLEAR:
return make_Color(0,0,0,0);
case GL_SET:
return make_Color(255,255,255,255);
case GL_COPY:
return s;
case GL_COPY_INVERTED:
return make_Color(~s.r, ~s.g, ~s.b, ~s.a);
case GL_NOOP:
return d;
case GL_INVERT:
return make_Color(~d.r, ~d.g, ~d.b, ~d.a);
case GL_AND:
return make_Color(s.r & d.r, s.g & d.g, s.b & d.b, s.a & d.a);
case GL_NAND:
return make_Color(~(s.r & d.r), ~(s.g & d.g), ~(s.b & d.b), ~(s.a & d.a));
case GL_OR:
return make_Color(s.r | d.r, s.g | d.g, s.b | d.b, s.a | d.a);
case GL_NOR:
return make_Color(~(s.r | d.r), ~(s.g | d.g), ~(s.b | d.b), ~(s.a | d.a));
case GL_XOR:
return make_Color(s.r ^ d.r, s.g ^ d.g, s.b ^ d.b, s.a ^ d.a);
case GL_EQUIV:
return make_Color(~(s.r ^ d.r), ~(s.g ^ d.g), ~(s.b ^ d.b), ~(s.a ^ d.a));
case GL_AND_REVERSE:
return make_Color(s.r & ~d.r, s.g & ~d.g, s.b & ~d.b, s.a & ~d.a);
case GL_AND_INVERTED:
return make_Color(~s.r & d.r, ~s.g & d.g, ~s.b & d.b, ~s.a & d.a);
case GL_OR_REVERSE:
return make_Color(s.r | ~d.r, s.g | ~d.g, s.b | ~d.b, s.a | ~d.a);
case GL_OR_INVERTED:
return make_Color(~s.r | d.r, ~s.g | d.g, ~s.b | d.b, ~s.a | d.a);
default:
puts("Unrecognized logic op!, defaulting to GL_COPY");
return s;
}
}
static int stencil_test(u8 stencil)
{
int func, ref, mask;
// TODO what about non-triangles, should use front values, so need to make sure that's set?
if (c->builtins.gl_FrontFacing) {
func = c->stencil_func;
ref = c->stencil_ref;
mask = c->stencil_valuemask;
} else {
func = c->stencil_func_back;
ref = c->stencil_ref_back;
mask = c->stencil_valuemask_back;
}
switch (func) {
case GL_NEVER: return 0;
case GL_LESS: return (ref & mask) < (stencil & mask);
case GL_LEQUAL: return (ref & mask) <= (stencil & mask);
case GL_GREATER: return (ref & mask) > (stencil & mask);
case GL_GEQUAL: return (ref & mask) >= (stencil & mask);
case GL_EQUAL: return (ref & mask) == (stencil & mask);
case GL_NOTEQUAL: return (ref & mask) != (stencil & mask);
case GL_ALWAYS: return 1;
default:
puts("Error: unrecognized stencil function!");
return 0;
}
}
static void stencil_op(int stencil, int depth, u8* dest)
{
int op, ref, mask;
// make them proper arrays in gl_context?
GLenum* ops;
// TODO what about non-triangles, should use front values, so need to make sure that's set?
if (c->builtins.gl_FrontFacing) {
ops = &c->stencil_sfail;
ref = c->stencil_ref;
mask = c->stencil_writemask;
} else {
ops = &c->stencil_sfail_back;
ref = c->stencil_ref_back;
mask = c->stencil_writemask_back;
}
op = (!stencil) ? ops[0] : ((!depth) ? ops[1] : ops[2]);
u8 val = *dest;
switch (op) {
case GL_KEEP: return;
case GL_ZERO: val = 0; break;
case GL_REPLACE: val = ref; break;
case GL_INCR: if (val < 255) val++; break;
case GL_INCR_WRAP: val++; break;
case GL_DECR: if (val > 0) val--; break;
case GL_DECR_WRAP: val--; break;
case GL_INVERT: val = ~val;
}
*dest = val & mask;
}
static void draw_pixel_vec2(vec4 cf, vec2 pos, float z)
{
/*
* spec pg 110:
Point rasterization produces a fragment for each framebuffer pixel whose center
lies inside a square centered at the point’s (x w , y w ), with side length equal to the
current point size.
for a 1 pixel size point there are only 3 edge cases where more than 1 pixel center (0.5, 0.5)
would fall on the very edge of a 1 pixel square. I think just drawing the upper or upper
corner pixel in these cases is fine and makes sense since width and height are actually 0.01 less
than full, see make_viewport_matrix
TODO point size > 1
*/
draw_pixel(cf, pos.x, pos.y, z);
}
static void draw_pixel(vec4 cf, int x, int y, float z)
{
if (c->scissor_test) {
if (x < c->scissor_lx || y < c->scissor_ly || x >= c->scissor_ux || y >= c->scissor_uy) {
return;
}
}
//MSAA
//Stencil Test TODO have to handle when there is no stencil or depth buffer
//(change gl_init to make stencil and depth buffers optional)
u8* stencil_dest = &c->stencil_buf.lastrow[-y*c->stencil_buf.w + x];
if (c->stencil_test) {
if (!stencil_test(*stencil_dest)) {
stencil_op(0, 1, stencil_dest);
return;
}
}
//Depth test if necessary
if (c->depth_test) {
// I made gl_FragDepth read/write, ie same == to gl_FragCoord.z going into the shader
// so I can just always use gl_FragDepth here
float dest_depth = ((float*)c->zbuf.lastrow)[-y*c->zbuf.w + x];
float src_depth = z; //c->builtins.gl_FragDepth; // pass as parameter?
int depth_result = depthtest(src_depth, dest_depth);
if (c->stencil_test) {
stencil_op(1, depth_result, stencil_dest);
}
if (!depth_result) {
return;
}
((float*)c->zbuf.lastrow)[-y*c->zbuf.w + x] = src_depth;
} else if (c->stencil_test) {
stencil_op(1, 1, stencil_dest);
}
//Blending
Color dest_color, src_color;
u32* dest = &((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x];
dest_color = make_Color((*dest & c->Rmask) >> c->Rshift, (*dest & c->Gmask) >> c->Gshift, (*dest & c->Bmask) >> c->Bshift, (*dest & c->Amask) >> c->Ashift);
if (c->blend) {
//TODO clamp in blend_pixel? return the vec4 and clamp?
src_color = blend_pixel(cf, Color_to_vec4(dest_color));
} else {
cf.x = clampf_01(cf.x);
cf.y = clampf_01(cf.y);
cf.z = clampf_01(cf.z);
cf.w = clampf_01(cf.w);
src_color = vec4_to_Color(cf);
}
//this line neded the negation in the viewport matrix
//((u32*)c->back_buffer.buf)[y*buf.w+x] = c.a << 24 | c.c << 16 | c.g << 8 | c.b;
//Logic Ops
if (c->logic_ops) {
src_color = logic_ops_pixel(src_color, dest_color);
}
//Dithering
//((u32*)c->back_buffer.buf)[(buf.h-1-y)*buf.w + x] = c.a << 24 | c.c << 16 | c.g << 8 | c.b;
//((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x] = c.a << 24 | c.c << 16 | c.g << 8 | c.b;
*dest = (u32)src_color.a << c->Ashift | (u32)src_color.r << c->Rshift | (u32)src_color.g << c->Gshift | (u32)src_color.b << c->Bshift;
}
#include <stdarg.h>
/******************************************
* PORTABLEGL_IMPLEMENTATION
******************************************/
#include <stdio.h>
#include <assert.h>
#include <float.h>
// for CHAR_BIT
#include <limits.h>
#ifdef DEBUG
#define IS_VALID(target, error, ...) is_valid(target, error, __VA_ARGS__)
#else
#define IS_VALID(target, error, ...) 1
#endif
int is_valid(GLenum target, GLenum error, int n, ...)
{
va_list argptr;
va_start(argptr, n);
for (int i=0; i<n; ++i) {
if (target == va_arg(argptr, GLenum)) {
return 1;
}
}
va_end(argptr);
if (!c->error) {
c->error = error;
}
return 0;
}
// default pass through shaders for index 0
void default_vs(float* vs_output, void* vertex_attribs, Shader_Builtins* builtins, void* uniforms)
{
builtins->gl_Position = ((vec4*)vertex_attribs)[0];
}
void default_fs(float* fs_input, Shader_Builtins* builtins, void* uniforms)
{
vec4* fragcolor = &builtins->gl_FragColor;
//wish I could use a compound literal, stupid C++ compatibility
fragcolor->x = 1.0f;
fragcolor->y = 0.0f;
fragcolor->z = 0.0f;
fragcolor->w = 1.0f;
}
void init_glVertex_Array(glVertex_Array* v)
{
v->deleted = GL_FALSE;
for (int i=0; i<GL_MAX_VERTEX_ATTRIBS; ++i)
init_glVertex_Attrib(&v->vertex_attribs[i]);
}
void init_glVertex_Attrib(glVertex_Attrib* v)
{
/*
GLint size; // number of components 1-4
GLenum type; // GL_FLOAT, default
GLsizei stride; //
GLsizei offset; //
GLboolean normalized;
unsigned int buf;
GLboolean enabled;
GLuint divisor;
*/
v->buf = 0;
v->enabled = 0;
v->divisor = 0;
}
#define GET_SHIFT(mask, shift) \
do {\
shift = 0;\
while ((mask & 1) == 0) {\
mask >>= 1;\
++shift;\
}\
} while (0)
int init_glContext(glContext* context, u32** back, int w, int h, int bitdepth, u32 Rmask, u32 Gmask, u32 Bmask, u32 Amask)
{
if (bitdepth > 32 || !back)
return 0;
context->user_alloced_backbuf = *back != NULL;
if (!*back) {
int bytes_per_pixel = (bitdepth + CHAR_BIT-1) / CHAR_BIT;
*back = (u32*) malloc(w * h * bytes_per_pixel);
if (!*back)
return 0;
}
context->zbuf.buf = (u8*) malloc(w*h * sizeof(float));
if (!context->zbuf.buf) {
if (!context->user_alloced_backbuf) {
free(*back);
*back = NULL;
}
return 0;
}
context->stencil_buf.buf = (u8*) malloc(w*h);
if (!context->stencil_buf.buf) {
if (!context->user_alloced_backbuf) {
free(*back);
*back = NULL;
}
free(context->zbuf.buf);
return 0;
}
context->x_min = 0;
context->y_min = 0;
context->x_max = w;
context->y_max = h;
context->zbuf.w = w;
context->zbuf.h = h;
context->zbuf.lastrow = context->zbuf.buf + (h-1)*w*sizeof(float);
context->stencil_buf.w = w;
context->stencil_buf.h = h;
context->stencil_buf.lastrow = context->stencil_buf.buf + (h-1)*w;
context->back_buffer.w = w;
context->back_buffer.h = h;
context->back_buffer.buf = (u8*) *back;
context->back_buffer.lastrow = context->back_buffer.buf + (h-1)*w*sizeof(u32);
context->bitdepth = bitdepth; //not used yet
context->Rmask = Rmask;
context->Gmask = Gmask;
context->Bmask = Bmask;
context->Amask = Amask;
GET_SHIFT(Rmask, context->Rshift);
GET_SHIFT(Gmask, context->Gshift);
GET_SHIFT(Bmask, context->Bshift);
GET_SHIFT(Amask, context->Ashift);
//initialize all vectors
cvec_glVertex_Array(&context->vertex_arrays, 0, 3);
cvec_glBuffer(&context->buffers, 0, 3);
cvec_glProgram(&context->programs, 0, 3);
cvec_glTexture(&context->textures, 0, 1);
cvec_glVertex(&context->glverts, 0, 10);
//TODO might as well just set it to MAX_VERTICES * MAX_OUTPUT_COMPONENTS
cvec_float(&context->vs_output.output_buf, 0, 0);
context->clear_stencil = 0;
context->clear_color = make_Color(0, 0, 0, 0);
SET_VEC4(context->blend_color, 0, 0, 0, 0);
context->point_size = 1.0f;
context->clear_depth = 1.0f;
context->depth_range_near = 0.0f;
context->depth_range_far = 1.0f;
make_viewport_matrix(context->vp_mat, 0, 0, w, h, 1);
//set flags
//TODO match order in structure definition
context->provoking_vert = GL_LAST_VERTEX_CONVENTION;
context->cull_mode = GL_BACK;
context->cull_face = GL_FALSE;
context->front_face = GL_CCW;
context->depth_test = GL_FALSE;
context->fragdepth_or_discard = GL_FALSE;
context->depth_clamp = GL_FALSE;
context->depth_mask = GL_TRUE;
context->blend = GL_FALSE;
context->logic_ops = GL_FALSE;
context->poly_offset = GL_FALSE;
context->scissor_test = GL_FALSE;
context->stencil_test = GL_FALSE;
context->stencil_writemask = -1; // all 1s for the masks
context->stencil_writemask_back = -1;
context->stencil_ref = 0;
context->stencil_ref_back = 0;
context->stencil_valuemask = -1;
context->stencil_valuemask_back = -1;
context->stencil_func = GL_ALWAYS;
context->stencil_func_back = GL_ALWAYS;
context->stencil_sfail = GL_KEEP;
context->stencil_dpfail = GL_KEEP;
context->stencil_dppass = GL_KEEP;
context->stencil_sfail_back = GL_KEEP;
context->stencil_dpfail_back = GL_KEEP;
context->stencil_dppass_back = GL_KEEP;
context->logic_func = GL_COPY;
context->blend_sfactor = GL_ONE;
context->blend_dfactor = GL_ZERO;
context->blend_equation = GL_FUNC_ADD;
context->depth_func = GL_LESS;
context->line_smooth = GL_FALSE;
context->poly_mode_front = GL_FILL;
context->poly_mode_back = GL_FILL;
context->point_spr_origin = GL_UPPER_LEFT;
context->poly_factor = 0.0f;
context->poly_units = 0.0f;
context->scissor_lx = 0;
context->scissor_ly = 0;
context->scissor_ux = w;
context->scissor_uy = h;
// According to refpages https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glPixelStore.xhtml
context->unpack_alignment = 4;
context->pack_alignment = 4;
context->draw_triangle_front = draw_triangle_fill;
context->draw_triangle_back = draw_triangle_fill;
context->error = GL_NO_ERROR;
//program 0 is supposed to be undefined but not invalid so I'll
//just make it default
glProgram tmp_prog = { default_vs, default_fs, NULL, GL_FALSE };
cvec_push_glProgram(&context->programs, tmp_prog);
context->cur_program = 0;
//setup default vertex_array (vao) at position 0
//we're like a compatibility profile for this but come on
//no reason not to have this imo
//https://www.opengl.org/wiki/Vertex_Specification#Vertex_Array_Object
glVertex_Array tmp_va;
init_glVertex_Array(&tmp_va);
cvec_push_glVertex_Array(&context->vertex_arrays, tmp_va);
context->cur_vertex_array = 0;
//setup buffers and textures
//need to push back once since 0 is invalid
//valid buffers have to start at position 1
glBuffer tmp_buf;
tmp_buf.user_owned = GL_TRUE;
tmp_buf.deleted = GL_FALSE;
glTexture tmp_tex;
tmp_tex.user_owned = GL_TRUE;
tmp_tex.deleted = GL_FALSE;
tmp_tex.format = GL_RGBA;
tmp_tex.type = GL_TEXTURE_UNBOUND;
tmp_tex.data = NULL;
tmp_tex.w = 0;
tmp_tex.h = 0;
tmp_tex.d = 0;
cvec_push_glBuffer(&context->buffers, tmp_buf);
cvec_push_glTexture(&context->textures, tmp_tex);
return 1;
}
void free_glContext(glContext* context)
{
int i;
free(context->zbuf.buf);
free(context->stencil_buf.buf);
if (!context->user_alloced_backbuf) {
free(context->back_buffer.buf);
}
for (i=0; i<context->buffers.size; ++i) {
if (!context->buffers.a[i].user_owned) {
printf("freeing buffer %d\n", i);
free(context->buffers.a[i].data);
}
}
for (i=0; i<context->textures.size; ++i) {
if (!context->textures.a[i].user_owned) {
printf("freeing texture %d\n", i);
free(context->textures.a[i].data);
}
}
//free vectors
cvec_free_glVertex_Array(&context->vertex_arrays);
cvec_free_glBuffer(&context->buffers);
cvec_free_glProgram(&context->programs);
cvec_free_glTexture(&context->textures);
cvec_free_glVertex(&context->glverts);
cvec_free_float(&context->vs_output.output_buf);
}
void set_glContext(glContext* context)
{
c = context;
}
void* pglResizeFramebuffer(size_t w, size_t h)
{
u8* tmp;
tmp = (u8*) realloc(c->zbuf.buf, w*h * sizeof(float));
if (!tmp) {
if (c->error == GL_NO_ERROR)
c->error = GL_OUT_OF_MEMORY;
return NULL;
}
c->zbuf.buf = tmp;
c->zbuf.w = w;
c->zbuf.h = h;
c->zbuf.lastrow = c->zbuf.buf + (h-1)*w*sizeof(float);
tmp = (u8*) realloc(c->back_buffer.buf, w*h * sizeof(u32));
if (!tmp) {
if (c->error == GL_NO_ERROR)
c->error = GL_OUT_OF_MEMORY;
return NULL;
}
c->back_buffer.buf = tmp;
c->back_buffer.w = w;
c->back_buffer.h = h;
c->back_buffer.lastrow = c->back_buffer.buf + (h-1)*w*sizeof(u32);
return tmp;
}
GLubyte* glGetString(GLenum name)
{
static GLubyte vendor[] = "Robert Winkler";
static GLubyte renderer[] = "PortableGL";
static GLubyte version[] = "OpenGL 3.x-ish PortableGL 0.95";
static GLubyte shading_language[] = "C/C++";
switch (name) {
case GL_VENDOR: return vendor;
case GL_RENDERER: return renderer;
case GL_VERSION: return version;
case GL_SHADING_LANGUAGE_VERSION: return shading_language;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
return 0;
}
}
GLenum glGetError()
{
GLenum err = c->error;
c->error = GL_NO_ERROR;
return err;
}
void glGenVertexArrays(GLsizei n, GLuint* arrays)
{
glVertex_Array tmp;
init_glVertex_Array(&tmp);
tmp.deleted = GL_FALSE;
//fill up empty slots first
--n;
for (int i=1; i<c->vertex_arrays.size && n>=0; ++i) {
if (c->vertex_arrays.a[i].deleted) {
c->vertex_arrays.a[i] = tmp;
arrays[n--] = i;
}
}
for (; n>=0; --n) {
cvec_push_glVertex_Array(&c->vertex_arrays, tmp);
arrays[n] = c->vertex_arrays.size-1;
}
}
void glDeleteVertexArrays(GLsizei n, const GLuint* arrays)
{
for (int i=0; i<n; ++i) {
if (!arrays[i] || arrays[i] >= c->vertex_arrays.size)
continue;
if (arrays[i] == c->cur_vertex_array) {
//TODO check if memcpy isn't enough
memcpy(&c->vertex_arrays.a[0], &c->vertex_arrays.a[arrays[i]], sizeof(glVertex_Array));
c->cur_vertex_array = 0;
}
c->vertex_arrays.a[arrays[i]].deleted = GL_TRUE;
}
}
void glGenBuffers(GLsizei n, GLuint* buffers)
{
glBuffer tmp;
tmp.user_owned = GL_TRUE; // NOTE: Doesn't really matter at this point
tmp.data = NULL;
tmp.deleted = GL_FALSE;
//fill up empty slots first
--n;
for (int i=1; i<c->buffers.size && n>=0; ++i) {
if (c->buffers.a[i].deleted) {
c->buffers.a[i] = tmp;
buffers[n--] = i;
}
}
for (; n>=0; --n) {
cvec_push_glBuffer(&c->buffers, tmp);
buffers[n] = c->buffers.size-1;
}
}
void glDeleteBuffers(GLsizei n, const GLuint* buffers)
{
GLenum type;
for (int i=0; i<n; ++i) {
if (!buffers[i] || buffers[i] >= c->buffers.size)
continue;
// NOTE(rswinkle): type is stored as correct index not the raw enum value so no need to
// subtract here see glBindBuffer
type = c->buffers.a[buffers[i]].type;
if (buffers[i] == c->bound_buffers[type])
c->bound_buffers[type] = 0;
if (!c->buffers.a[buffers[i]].user_owned) {
free(c->buffers.a[buffers[i]].data);
c->buffers.a[buffers[i]].data = NULL;
}
c->buffers.a[buffers[i]].deleted = GL_TRUE;
}
}
void glGenTextures(GLsizei n, GLuint* textures)
{
glTexture tmp;
//SET_VEC4(tmp.border_color, 0, 0, 0, 0);
tmp.mag_filter = GL_LINEAR;
tmp.min_filter = GL_LINEAR; //NOTE: spec says should be mipmap_linear
tmp.wrap_s = GL_REPEAT;
tmp.wrap_t = GL_REPEAT;
tmp.data = NULL;
tmp.deleted = GL_FALSE;
tmp.user_owned = GL_TRUE; // NOTE: could be either before data
tmp.format = GL_RGBA;
tmp.type = GL_TEXTURE_UNBOUND;
tmp.w = 0;
tmp.h = 0;
tmp.d = 0;
--n;
for (int i=0; i<c->textures.size && n>=0; ++i) {
if (c->textures.a[i].deleted) {
c->textures.a[i] = tmp;
textures[n--] = i;
}
}
for (; n>=0; --n) {
cvec_push_glTexture(&c->textures, tmp);
textures[n] = c->textures.size-1;
}
}
void glDeleteTextures(GLsizei n, GLuint* textures)
{
GLenum type;
for (int i=0; i<n; ++i) {
if (!textures[i] || textures[i] >= c->textures.size)
continue;
// NOTE(rswinkle): type is stored as correct index not the raw enum value so no need to
// subtract here see glBindTexture
type = c->textures.a[textures[i]].type;
if (textures[i] == c->bound_textures[type])
c->bound_textures[type] = 0;
if (!c->textures.a[textures[i]].user_owned) {
free(c->textures.a[textures[i]].data);
c->textures.a[textures[i]].data = NULL;
}
c->textures.a[textures[i]].deleted = GL_TRUE;
}
}
void glBindVertexArray(GLuint array)
{
if (array < c->vertex_arrays.size && c->vertex_arrays.a[array].deleted == GL_FALSE) {
c->cur_vertex_array = array;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION;
}
}
void glBindBuffer(GLenum target, GLuint buffer)
{
//GL_ARRAY_BUFFER, GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, GL_ELEMENT_ARRAY_BUFFER,
//GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER, GL_TEXTURE_BUFFER, GL_TRANSFORM_FEEDBACK_BUFFER, or GL_UNIFORM_BUFFER.
if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
target -= GL_ARRAY_BUFFER;
if (buffer < c->buffers.size && c->buffers.a[buffer].deleted == GL_FALSE) {
c->bound_buffers[target] = buffer;
// Note type isn't set till binding and we're not storing the raw
// enum but the enum - GL_ARRAY_BUFFER so it's an index into c->bound_buffers
// TODO need to see what's supposed to happen if you try to bind
// a buffer to multiple targets
c->buffers.a[buffer].type = target;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION;
}
}
void glBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage)
{
if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//check for usage later
target -= GL_ARRAY_BUFFER;
if (c->bound_buffers[target] == 0) {
if (!c->error)
c->error = GL_INVALID_OPERATION;
return;
}
//always NULL or valid
free(c->buffers.a[c->bound_buffers[target]].data);
if (!(c->buffers.a[c->bound_buffers[target]].data = (u8*) malloc(size))) {
if (!c->error)
c->error = GL_OUT_OF_MEMORY;
// GL state is undefined from here on
return;
}
if (data) {
memcpy(c->buffers.a[c->bound_buffers[target]].data, data, size);
}
c->buffers.a[c->bound_buffers[target]].user_owned = GL_FALSE;
c->buffers.a[c->bound_buffers[target]].size = size;
if (target == GL_ELEMENT_ARRAY_BUFFER - GL_ARRAY_BUFFER) {
c->vertex_arrays.a[c->cur_vertex_array].element_buffer = c->bound_buffers[target];
}
}
void glBufferSubData(GLenum target, GLsizei offset, GLsizei size, const GLvoid* data)
{
if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
target -= GL_ARRAY_BUFFER;
if (c->bound_buffers[target] == 0) {
if (!c->error)
c->error = GL_INVALID_OPERATION;
return;
}
if (offset + size > c->buffers.a[c->bound_buffers[target]].size) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
memcpy(&c->buffers.a[c->bound_buffers[target]].data[offset], data, size);
}
void glBindTexture(GLenum target, GLuint texture)
{
if (target < GL_TEXTURE_1D || target >= GL_NUM_TEXTURE_TYPES) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
target -= GL_TEXTURE_UNBOUND + 1;
if (texture < c->textures.size && !c->textures.a[texture].deleted) {
if (c->textures.a[texture].type == GL_TEXTURE_UNBOUND) {
c->bound_textures[target] = texture;
c->textures.a[texture].type = target;
} else if (c->textures.a[texture].type == target) {
c->bound_textures[target] = texture;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION;
}
} else if (!c->error) {
c->error = GL_INVALID_VALUE;
}
}
void glTexParameteri(GLenum target, GLenum pname, GLint param)
{
//GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP.
//will add others as they're implemented
if (target != GL_TEXTURE_1D && target != GL_TEXTURE_2D && target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY && target != GL_TEXTURE_RECTANGLE && target != GL_TEXTURE_CUBE_MAP) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//shift to range 0 - NUM_TEXTURES-1 to access bound_textures array
target -= GL_TEXTURE_UNBOUND + 1;
//GL_TEXTURE_BASE_LEVEL, GL_TEXTURE_COMPARE_FUNC, GL_TEXTURE_COMPARE_MODE, GL_TEXTURE_LOD_BIAS, GL_TEXTURE_MIN_FILTER,
//GL_TEXTURE_MAG_FILTER, GL_TEXTURE_MIN_LOD, GL_TEXTURE_MAX_LOD, GL_TEXTURE_MAX_LEVEL, GL_TEXTURE_SWIZZLE_R,
//GL_TEXTURE_SWIZZLE_G, GL_TEXTURE_SWIZZLE_B, GL_TEXTURE_SWIZZLE_A, GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T, or GL_TEXTURE_WRAP_R.
if (pname != GL_TEXTURE_MIN_FILTER && pname != GL_TEXTURE_MAG_FILTER &&
pname != GL_TEXTURE_WRAP_S && pname != GL_TEXTURE_WRAP_T && pname != GL_TEXTURE_WRAP_R) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (pname == GL_TEXTURE_MIN_FILTER) {
if(param != GL_NEAREST && param != GL_LINEAR && param != GL_NEAREST_MIPMAP_NEAREST &&
param != GL_NEAREST_MIPMAP_LINEAR && param != GL_LINEAR_MIPMAP_NEAREST &&
param != GL_LINEAR_MIPMAP_LINEAR) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//TODO mipmapping isn't actually supported, not sure it's worth trouble/perf hit
//just adding the enums to make porting easier
if (param == GL_NEAREST_MIPMAP_NEAREST || param == GL_NEAREST_MIPMAP_LINEAR)
param = GL_NEAREST;
if (param == GL_LINEAR_MIPMAP_NEAREST || param == GL_LINEAR_MIPMAP_LINEAR)
param = GL_LINEAR;
c->textures.a[c->bound_textures[target]].min_filter = param;
} else if (pname == GL_TEXTURE_MAG_FILTER) {
if(param != GL_NEAREST && param != GL_LINEAR) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->textures.a[c->bound_textures[target]].mag_filter = param;
} else if (pname == GL_TEXTURE_WRAP_S) {
if(param != GL_REPEAT && param != GL_CLAMP_TO_EDGE && param != GL_CLAMP_TO_BORDER && param != GL_MIRRORED_REPEAT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->textures.a[c->bound_textures[target]].wrap_s = param;
} else if (pname == GL_TEXTURE_WRAP_T) {
if(param != GL_REPEAT && param != GL_CLAMP_TO_EDGE && param != GL_CLAMP_TO_BORDER && param != GL_MIRRORED_REPEAT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->textures.a[c->bound_textures[target]].wrap_t = param;
} else if (pname == GL_TEXTURE_WRAP_R) {
if(param != GL_REPEAT && param != GL_CLAMP_TO_EDGE && param != GL_CLAMP_TO_BORDER && param != GL_MIRRORED_REPEAT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->textures.a[c->bound_textures[target]].wrap_r = param;
}
}
void glPixelStorei(GLenum pname, GLint param)
{
if (pname != GL_UNPACK_ALIGNMENT && pname != GL_PACK_ALIGNMENT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (param != 1 && param != 2 && param != 4 && param != 8) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (pname == GL_UNPACK_ALIGNMENT) {
c->unpack_alignment = param;
} else if (pname == GL_PACK_ALIGNMENT) {
c->pack_alignment = param;
}
}
void glTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_1D) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
if (type != GL_UNSIGNED_BYTE) {
return;
}
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// NULL or valid
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
if (!(c->textures.a[cur_tex].data = (u8*) malloc(width * components))) {
if (!c->error)
c->error = GL_OUT_OF_MEMORY;
//undefined state now
return;
}
u32* texdata = (u32*) c->textures.a[cur_tex].data;
if (data)
memcpy(&texdata[0], data, width*sizeof(u32));
c->textures.a[cur_tex].user_owned = GL_FALSE;
//TODO
//assume for now always RGBA coming in and that's what I'm storing it as
}
void glTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
//GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP.
//will add others as they're implemented
if (target != GL_TEXTURE_2D &&
target != GL_TEXTURE_RECTANGLE &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_X &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
//TODO support other types?
if (type != GL_UNSIGNED_BYTE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO I don't actually support anything other than GL_RGBA for input or
// internal format ... so I should probably make the others errors and
// I'm not even checking internalFormat currently..
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
int cur_tex;
// TODO If I ever support type other than GL_UNSIGNED_BYTE (also using for both internalformat and format)
int byte_width = width * components;
int padding_needed = byte_width % c->unpack_alignment;
int padded_row_len = (!padding_needed) ? byte_width : byte_width + c->unpack_alignment - padding_needed;
if (target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE) {
cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = height;
// either NULL or valid
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
if (!(c->textures.a[cur_tex].data = (u8*) malloc(height * byte_width))) {
if (!c->error)
c->error = GL_OUT_OF_MEMORY;
//undefined state now
return;
}
if (data) {
if (!padding_needed) {
memcpy(c->textures.a[cur_tex].data, data, height*byte_width);
} else {
for (int i=0; i<height; ++i) {
memcpy(&c->textures.a[cur_tex].data[i*byte_width], &((u8*)data)[i*padded_row_len], byte_width);
}
}
}
c->textures.a[cur_tex].user_owned = GL_FALSE;
} else { //CUBE_MAP
cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1];
// If we're reusing a texture, and we haven't already loaded
// one of the planes of the cubemap, data is either NULL or valid
if (!c->textures.a[cur_tex].w)
free(c->textures.a[cur_tex].data);
if (width != height) {
//TODO spec says INVALID_VALUE, man pages say INVALID_ENUM ?
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
int mem_size = width*height*6 * components;
if (c->textures.a[cur_tex].w == 0) {
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = width; //same cause square
if (!(c->textures.a[cur_tex].data = (u8*) malloc(mem_size))) {
if (!c->error)
c->error = GL_OUT_OF_MEMORY;
//undefined state now
return;
}
} else if (c->textures.a[cur_tex].w != width) {
//TODO spec doesn't say all sides must have same dimensions but it makes sense
//and this site suggests it http://www.opengl.org/wiki/Cubemap_Texture
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index
// TODO handle different format and internalFormat
int p = height*byte_width;
u8* texdata = c->textures.a[cur_tex].data;
if (data) {
if (!padding_needed) {
memcpy(&texdata[target*p], data, height*byte_width);
} else {
for (int i=0; i<height; ++i) {
memcpy(&texdata[target*p + i*byte_width], &((u8*)data)[i*padded_row_len], byte_width);
}
}
}
c->textures.a[cur_tex].user_owned = GL_FALSE;
} //end CUBE_MAP
}
void glTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = height;
c->textures.a[cur_tex].d = depth;
if (type != GL_UNSIGNED_BYTE) {
// TODO
return;
}
// TODO add error? only support GL_RGBA for now
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
int byte_width = width * components;
int padding_needed = byte_width % c->unpack_alignment;
int padded_row_len = (!padding_needed) ? byte_width : byte_width + c->unpack_alignment - padding_needed;
// NULL or valid
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
if (!(c->textures.a[cur_tex].data = (u8*) malloc(width*height*depth * components))) {
if (!c->error)
c->error = GL_OUT_OF_MEMORY;
//undefined state now
return;
}
u32* texdata = (u32*) c->textures.a[cur_tex].data;
if (data) {
if (!padding_needed) {
memcpy(texdata, data, width*height*depth*sizeof(u32));
} else {
for (int i=0; i<height*depth; ++i) {
memcpy(&texdata[i*byte_width], &((u8*)data)[i*padded_row_len], byte_width);
}
}
}
c->textures.a[cur_tex].user_owned = GL_FALSE;
//TODO
//assume for now always RGBA coming in and that's what I'm storing it as
}
void glTexSubImage1D(GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_1D) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
//only kind supported currently
if (type != GL_UNSIGNED_BYTE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//TODO
if (format != GL_RGBA) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (xoffset < 0 || xoffset + width > c->textures.a[cur_tex].w) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
u32* texdata = (u32*) c->textures.a[cur_tex].data;
memcpy(&texdata[xoffset], data, width*sizeof(u32));
}
void glTexSubImage2D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* data)
{
//GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP.
//will add others as they're implemented
if (target != GL_TEXTURE_2D &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_X &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//ignore level for now
if (type != GL_UNSIGNED_BYTE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (format != GL_RGBA) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
int cur_tex;
u32* d = (u32*) data;
if (target == GL_TEXTURE_2D) {
cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
u32* texdata = (u32*) c->textures.a[cur_tex].data;
if (xoffset < 0 || xoffset + width > c->textures.a[cur_tex].w || yoffset < 0 || yoffset + height > c->textures.a[cur_tex].h) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
int w = c->textures.a[cur_tex].w;
for (int i=0; i<height; ++i) {
memcpy(&texdata[(yoffset+i)*w + xoffset], &d[i*width], width*sizeof(u32));
}
} else { //CUBE_MAP
cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1];
u32* texdata = (u32*) c->textures.a[cur_tex].data;
int w = c->textures.a[cur_tex].w;
target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index
int p = w*w;
for (int i=0; i<height; ++i)
memcpy(&texdata[p*target + (yoffset+i)*w + xoffset], &d[i*width], width*sizeof(u32));
} //end CUBE_MAP
}
void glTexSubImage3D(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//ignore level for now
// TODO handle UNPACK alignment here as well...
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
if (type != GL_UNSIGNED_BYTE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//TODO
if (format != GL_RGBA) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (xoffset < 0 || xoffset + width > c->textures.a[cur_tex].w ||
yoffset < 0 || yoffset + height > c->textures.a[cur_tex].h ||
zoffset < 0 || zoffset + depth > c->textures.a[cur_tex].d) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
int w = c->textures.a[cur_tex].w;
int h = c->textures.a[cur_tex].h;
int p = w*h;
u32* d = (u32*) data;
u32* texdata = (u32*) c->textures.a[cur_tex].data;
for (int j=0; j<depth; ++j) {
for (int i=0; i<height; ++i) {
memcpy(&texdata[(zoffset+j)*p + (yoffset+i)*w + xoffset], &d[j*width*height + i*width], width*sizeof(u32));
}
}
}
void glVertexAttribPointer(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLsizei offset)
{
if (index >= GL_MAX_VERTEX_ATTRIBS || size < 1 || size > 4 || (!c->bound_buffers[GL_ARRAY_BUFFER-GL_ARRAY_BUFFER] && offset)) {
if (!c->error)
c->error = GL_INVALID_OPERATION;
return;
}
//TODO type Specifies the data type of each component in the array. The symbolic constants GL_BYTE, GL_UNSIGNED_BYTE, GL_SHORT,
//GL_UNSIGNED_SHORT, GL_INT, and GL_UNSIGNED_INT are accepted by both functions. Additionally GL_HALF_FLOAT, GL_FLOAT, GL_DOUBLE,
//GL_INT_2_10_10_10_REV, and GL_UNSIGNED_INT_2_10_10_10_REV are accepted by glVertexAttribPointer. The initial value is GL_FLOAT.
if (type != GL_FLOAT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
glVertex_Attrib* v = &(c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index]);
v->size = size;
v->type = type;
//TODO expand for other types etc.
v->stride = (stride) ? stride : size*sizeof(GLfloat);
v->offset = offset;
v->normalized = normalized;
// I put ARRAY_BUFFER-itself instead of 0 to reinforce that bound_buffers is indexed that way, buffer type - GL_ARRAY_BUFFER
v->buf = c->bound_buffers[GL_ARRAY_BUFFER-GL_ARRAY_BUFFER]; //can be 0 if offset is 0/NULL
}
void glEnableVertexAttribArray(GLuint index)
{
c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index].enabled = GL_TRUE;
}
void glDisableVertexAttribArray(GLuint index)
{
c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index].enabled = GL_FALSE;
}
void glVertexAttribDivisor(GLuint index, GLuint divisor)
{
if (index >= GL_MAX_VERTEX_ATTRIBS) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
c->vertex_arrays.a[c->cur_vertex_array].vertex_attribs[index].divisor = divisor;
}
//TODO not used
vec4 get_vertex_attrib_array(glVertex_Attrib* v, GLsizei i)
{
//this line need work for future flexibility and handling more than floats
u8* buf_pos = (u8*)c->buffers.a[v->buf].data + v->offset + v->stride*i;
vec4 tmpvec4;
memcpy(&tmpvec4, buf_pos, sizeof(float)*v->size);
//c->cur_vertex_array->vertex_attribs[enabled[j]].buf->data;
return tmpvec4;
}
void glDrawArrays(GLenum mode, GLint first, GLsizei count)
{
if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO should I just make GLsizei an uint32_t rather than int32_t?
if (count < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (!count)
return;
run_pipeline(mode, first, count, 0, 0, GL_FALSE);
}
void glDrawElements(GLenum mode, GLsizei count, GLenum type, GLsizei offset)
{
if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//error not in the spec but says type must be one of these ... strange
if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_INT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO should I just make GLsizei an uint32_t rather than int32_t?
if (count < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (!count)
return;
c->buffers.a[c->vertex_arrays.a[c->cur_vertex_array].element_buffer].type = type;
run_pipeline(mode, offset, count, 0, 0, GL_TRUE);
}
void glDrawArraysInstanced(GLenum mode, GLint first, GLsizei count, GLsizei instancecount)
{
if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (count < 0 || instancecount < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (!count || !instancecount)
return;
for (unsigned int instance = 0; instance < instancecount; ++instance) {
run_pipeline(mode, first, count, instance, 0, GL_FALSE);
}
}
void glDrawArraysInstancedBaseInstance(GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance)
{
if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (count < 0 || instancecount < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (!count || !instancecount)
return;
for (unsigned int instance = 0; instance < instancecount; ++instance) {
run_pipeline(mode, first, count, instance, baseinstance, GL_FALSE);
}
}
void glDrawElementsInstanced(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei instancecount)
{
if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// NOTE: error not in the spec but says type must be one of these ... strange
if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_INT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (count < 0 || instancecount < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (!count || !instancecount)
return;
c->buffers.a[c->vertex_arrays.a[c->cur_vertex_array].element_buffer].type = type;
for (unsigned int instance = 0; instance < instancecount; ++instance) {
run_pipeline(mode, offset, count, instance, 0, GL_TRUE);
}
}
void glDrawElementsInstancedBaseInstance(GLenum mode, GLsizei count, GLenum type, GLsizei offset, GLsizei instancecount, GLuint baseinstance)
{
if (mode < GL_POINTS || mode > GL_TRIANGLE_FAN) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//error not in the spec but says type must be one of these ... strange
if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT && type != GL_UNSIGNED_INT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (count < 0 || instancecount < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
if (!count || !instancecount)
return;
c->buffers.a[c->vertex_arrays.a[c->cur_vertex_array].element_buffer].type = type;
for (unsigned int instance = 0; instance < instancecount; ++instance) {
run_pipeline(mode, offset, count, instance, baseinstance, GL_TRUE);
}
}
void glViewport(int x, int y, GLsizei width, GLsizei height)
{
if (width < 0 || height < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
make_viewport_matrix(c->vp_mat, x, y, width, height, 1);
c->x_min = x;
c->y_min = y;
c->x_max = x + width;
c->y_max = y + height;
}
void glClearColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
{
red = clampf_01(red);
green = clampf_01(green);
blue = clampf_01(blue);
alpha = clampf_01(alpha);
vec4 tmp = { red, green, blue, alpha };
c->clear_color = vec4_to_Color(tmp);
}
void glClearDepth(GLclampf depth)
{
c->clear_depth = clampf_01(depth);
}
void glDepthFunc(GLenum func)
{
if (func < GL_LESS || func > GL_NEVER) {
if (!c->error)
c->error =GL_INVALID_ENUM;
return;
}
c->depth_func = func;
}
void glDepthRange(GLclampf nearVal, GLclampf farVal)
{
c->depth_range_near = clampf_01(nearVal);
c->depth_range_far = clampf_01(farVal);
}
void glDepthMask(GLboolean flag)
{
c->depth_mask = flag;
}
void glClear(GLbitfield mask)
{
if (!(mask & (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT))) {
if (!c->error)
c->error = GL_INVALID_VALUE;
printf("failed to clear\n");
return;
}
// TODO since all the buffers should be the same width and height
// (right? even though they're different types they should be 1 to 1),
// why not just set local w and h and use for all instead of member w/h
// for each framebuffer?
// better to just set min/max x/y and use nested loops even when scissor is disabled?
Color col = c->clear_color;
if (mask & GL_COLOR_BUFFER_BIT) {
if (!c->scissor_test) {
for (int i=0; i<c->back_buffer.w*c->back_buffer.h; ++i) {
((u32*)c->back_buffer.buf)[i] = (u32)col.a << c->Ashift | (u32)col.r << c->Rshift | (u32)col.g << c->Gshift | (u32)col.b << c->Bshift;
}
} else {
for (int y=c->scissor_ly; y<c->scissor_uy; ++y) {
for (int x=c->scissor_lx; x<c->scissor_ux; ++x) {
((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x] = (u32)col.a << c->Ashift | (u32)col.r << c->Rshift | (u32)col.g << c->Gshift | (u32)col.b << c->Bshift;
}
}
}
}
if (mask & GL_DEPTH_BUFFER_BIT) {
if (!c->scissor_test) {
//TODO try a big memcpy or other way to clear it
for (int i=0; i < c->zbuf.w * c->zbuf.h; ++i) {
((float*)c->zbuf.buf)[i] = c->clear_depth;
}
} else {
for (int y=c->scissor_ly; y<c->scissor_uy; ++y) {
for (int x=c->scissor_lx; x<c->scissor_ux; ++x) {
((float*)c->zbuf.lastrow)[-y*c->zbuf.w + x] = c->clear_depth;
}
}
}
}
if (mask & GL_STENCIL_BUFFER_BIT) {
if (!c->scissor_test) {
//TODO try a big memcpy or other way to clear it
for (int i=0; i < c->stencil_buf.w * c->stencil_buf.h; ++i) {
c->stencil_buf.buf[i] = c->clear_stencil;
}
} else {
for (int y=c->scissor_ly; y<c->scissor_uy; ++y) {
for (int x=c->scissor_lx; x<c->scissor_ux; ++x) {
c->stencil_buf.lastrow[-y*c->stencil_buf.w + x] = c->clear_stencil;
}
}
}
}
}
void glEnable(GLenum cap)
{
switch (cap) {
case GL_CULL_FACE:
c->cull_face = GL_TRUE;
break;
case GL_DEPTH_TEST:
c->depth_test = GL_TRUE;
break;
case GL_DEPTH_CLAMP:
c->depth_clamp = GL_TRUE;
break;
case GL_LINE_SMOOTH:
// TODO implementation needs work/upgrade
//c->line_smooth = GL_TRUE;
break;
case GL_BLEND:
c->blend = GL_TRUE;
break;
case GL_COLOR_LOGIC_OP:
c->logic_ops = GL_TRUE;
break;
case GL_POLYGON_OFFSET_FILL:
c->poly_offset = GL_TRUE;
break;
case GL_SCISSOR_TEST:
c->scissor_test = GL_TRUE;
break;
case GL_STENCIL_TEST:
c->stencil_test = GL_TRUE;
break;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
}
}
void glDisable(GLenum cap)
{
switch (cap) {
case GL_CULL_FACE:
c->cull_face = GL_FALSE;
break;
case GL_DEPTH_TEST:
c->depth_test = GL_FALSE;
break;
case GL_DEPTH_CLAMP:
c->depth_clamp = GL_FALSE;
break;
case GL_LINE_SMOOTH:
c->line_smooth = GL_FALSE;
break;
case GL_BLEND:
c->blend = GL_FALSE;
break;
case GL_COLOR_LOGIC_OP:
c->logic_ops = GL_FALSE;
break;
case GL_POLYGON_OFFSET_FILL:
c->poly_offset = GL_FALSE;
break;
case GL_SCISSOR_TEST:
c->scissor_test = GL_FALSE;
break;
case GL_STENCIL_TEST:
c->stencil_test = GL_FALSE;
break;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
}
}
GLboolean glIsEnabled(GLenum cap)
{
// make up my own enum for this? rename member as no_early_z?
//GLboolean fragdepth_or_discard;
switch (cap) {
case GL_DEPTH_TEST: return c->depth_test;
case GL_LINE_SMOOTH: return c->line_smooth;
case GL_CULL_FACE: return c->cull_face;
case GL_DEPTH_CLAMP: return c->depth_clamp;
case GL_BLEND: return c->blend;
case GL_COLOR_LOGIC_OP: return c->logic_ops;
case GL_POLYGON_OFFSET_FILL: return c->poly_offset;
case GL_SCISSOR_TEST: return c->scissor_test;
case GL_STENCIL_TEST: return c->stencil_test;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
}
return GL_FALSE;
}
void glGetBooleanv(GLenum pname, GLboolean* params)
{
// not sure it's worth adding every enum, spec says
// gelGet* will convert/map types if they don't match the function
switch (pname) {
case GL_DEPTH_TEST: *params = c->depth_test; break;
case GL_LINE_SMOOTH: *params = c->line_smooth; break;
case GL_CULL_FACE: *params = c->cull_face; break;
case GL_DEPTH_CLAMP: *params = c->depth_clamp; break;
case GL_BLEND: *params = c->blend; break;
case GL_COLOR_LOGIC_OP: *params = c->logic_ops; break;
case GL_POLYGON_OFFSET_FILL: *params = c->poly_offset; break;
case GL_SCISSOR_TEST: *params = c->scissor_test; break;
case GL_STENCIL_TEST: *params = c->stencil_test; break;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
}
}
void glGetFloatv(GLenum pname, GLfloat* params)
{
switch (pname) {
case GL_POLYGON_OFFSET_FACTOR: *params = c->poly_factor; break;
case GL_POLYGON_OFFSET_UNITS: *params = c->poly_units; break;
case GL_POINT_SIZE: *params = c->point_size; break;
case GL_DEPTH_CLEAR_VALUE: *params = c->clear_depth; break;
case GL_DEPTH_RANGE:
params[0] = c->depth_range_near;
params[1] = c->depth_range_near;
break;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
}
}
void glGetIntegerv(GLenum pname, GLint* params)
{
// TODO maybe make all the enum/int member names match the associated ENUM?
switch (pname) {
case GL_STENCIL_WRITE_MASK: params[0] = c->stencil_writemask; break;
case GL_STENCIL_REF: params[0] = c->stencil_ref; break;
case GL_STENCIL_VALUE_MASK: params[0] = c->stencil_valuemask; break;
case GL_STENCIL_FUNC: params[0] = c->stencil_func; break;
case GL_STENCIL_FAIL: params[0] = c->stencil_sfail; break;
case GL_STENCIL_PASS_DEPTH_FAIL: params[0] = c->stencil_dpfail; break;
case GL_STENCIL_PASS_DEPTH_PASS: params[0] = c->stencil_dppass; break;
case GL_STENCIL_BACK_WRITE_MASK: params[0] = c->stencil_writemask_back; break;
case GL_STENCIL_BACK_REF: params[0] = c->stencil_ref_back; break;
case GL_STENCIL_BACK_VALUE_MASK: params[0] = c->stencil_valuemask_back; break;
case GL_STENCIL_BACK_FUNC: params[0] = c->stencil_func_back; break;
case GL_STENCIL_BACK_FAIL: params[0] = c->stencil_sfail_back; break;
case GL_STENCIL_BACK_PASS_DEPTH_FAIL: params[0] = c->stencil_dpfail_back; break;
case GL_STENCIL_BACK_PASS_DEPTH_PASS: params[0] = c->stencil_dppass_back; break;
//TODO implement glBlendFuncSeparate and glBlendEquationSeparate
case GL_LOGIC_OP_MODE: params[0] = c->logic_func; break;
case GL_BLEND_SRC_RGB:
case GL_BLEND_SRC_ALPHA: params[0] = c->blend_sfactor; break;
case GL_BLEND_DST_RGB:
case GL_BLEND_DST_ALPHA: params[0] = c->blend_dfactor; break;
case GL_BLEND_EQUATION_RGB:
case GL_BLEND_EQUATION_ALPHA: params[0] = c->blend_equation; break;
case GL_CULL_FACE_MODE: params[0] = c->cull_mode; break;
case GL_FRONT_FACE: params[0] = c->front_face; break;
case GL_DEPTH_FUNC: params[0] = c->depth_func; break;
case GL_POINT_SPRITE_COORD_ORIGIN: params[0] = c->point_spr_origin;
case GL_PROVOKING_VERTEX: params[0] = c->provoking_vert; break;
case GL_POLYGON_MODE:
params[0] = c->poly_mode_front;
params[1] = c->poly_mode_back;
break;
default:
if (!c->error)
c->error = GL_INVALID_ENUM;
}
}
void glCullFace(GLenum mode)
{
if (mode != GL_FRONT && mode != GL_BACK && mode != GL_FRONT_AND_BACK) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->cull_mode = mode;
}
void glFrontFace(GLenum mode)
{
if (mode != GL_CCW && mode != GL_CW) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->front_face = mode;
}
void glPolygonMode(GLenum face, GLenum mode)
{
if ((face != GL_FRONT && face != GL_BACK && face != GL_FRONT_AND_BACK) ||
(mode != GL_POINT && mode != GL_LINE && mode != GL_FILL)) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (mode == GL_POINT) {
if (face == GL_FRONT) {
c->poly_mode_front = mode;
c->draw_triangle_front = draw_triangle_point;
} else if (face == GL_BACK) {
c->poly_mode_back = mode;
c->draw_triangle_back = draw_triangle_point;
} else {
c->poly_mode_front = mode;
c->poly_mode_back = mode;
c->draw_triangle_front = draw_triangle_point;
c->draw_triangle_back = draw_triangle_point;
}
} else if (mode == GL_LINE) {
if (face == GL_FRONT) {
c->poly_mode_front = mode;
c->draw_triangle_front = draw_triangle_line;
} else if (face == GL_BACK) {
c->poly_mode_back = mode;
c->draw_triangle_back = draw_triangle_line;
} else {
c->poly_mode_front = mode;
c->poly_mode_back = mode;
c->draw_triangle_front = draw_triangle_line;
c->draw_triangle_back = draw_triangle_line;
}
} else {
if (face == GL_FRONT) {
c->poly_mode_front = mode;
c->draw_triangle_front = draw_triangle_fill;
} else if (face == GL_BACK) {
c->poly_mode_back = mode;
c->draw_triangle_back = draw_triangle_fill;
} else {
c->poly_mode_front = mode;
c->poly_mode_back = mode;
c->draw_triangle_front = draw_triangle_fill;
c->draw_triangle_back = draw_triangle_fill;
}
}
}
void glPointSize(GLfloat size)
{
if (size <= 0.0f) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
c->point_size = size;
}
void glPointParameteri(GLenum pname, GLint param)
{
//also GL_POINT_FADE_THRESHOLD_SIZE
if (pname != GL_POINT_SPRITE_COORD_ORIGIN || (param != GL_LOWER_LEFT && param != GL_UPPER_LEFT)) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->point_spr_origin = param;
}
void glProvokingVertex(GLenum provokeMode)
{
if (provokeMode != GL_FIRST_VERTEX_CONVENTION && provokeMode != GL_LAST_VERTEX_CONVENTION) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->provoking_vert = provokeMode;
}
// Shader functions
GLuint pglCreateProgram(vert_func vertex_shader, frag_func fragment_shader, GLsizei n, GLenum* interpolation, GLboolean fragdepth_or_discard)
{
if (!vertex_shader || !fragment_shader) {
//TODO set error? doesn't in spec but I'll think about it
return 0;
}
if (n > GL_MAX_VERTEX_OUTPUT_COMPONENTS) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return 0;
}
glProgram tmp = {vertex_shader, fragment_shader, NULL, n, {0}, fragdepth_or_discard, GL_FALSE };
for (int i=0; i<n; ++i) {
tmp.interpolation[i] = interpolation[i];
}
for (int i=1; i<c->programs.size; ++i) {
if (c->programs.a[i].deleted && i != c->cur_program) {
c->programs.a[i] = tmp;
return i;
}
}
cvec_push_glProgram(&c->programs, tmp);
return c->programs.size-1;
}
void glDeleteProgram(GLuint program)
{
if (!program)
return;
if (program >= c->programs.size) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
c->programs.a[program].deleted = GL_TRUE;
}
void glUseProgram(GLuint program)
{
if (program >= c->programs.size) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
c->vs_output.size = c->programs.a[program].vs_output_size;
cvec_reserve_float(&c->vs_output.output_buf, c->vs_output.size * MAX_VERTICES);
c->vs_output.interpolation = c->programs.a[program].interpolation;
c->fragdepth_or_discard = c->programs.a[program].fragdepth_or_discard;
c->cur_program = program;
}
void pglSetUniform(void* uniform)
{
//TODO check for NULL? definitely if I ever switch to storing a local
//copy in glProgram
c->programs.a[c->cur_program].uniform = uniform;
}
void glBlendFunc(GLenum sfactor, GLenum dfactor)
{
if (sfactor < GL_ZERO || sfactor >= NUM_BLEND_FUNCS || dfactor < GL_ZERO || dfactor >= NUM_BLEND_FUNCS) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->blend_sfactor = sfactor;
c->blend_dfactor = dfactor;
}
void glBlendEquation(GLenum mode)
{
if (mode < GL_FUNC_ADD || mode >= NUM_BLEND_EQUATIONS ) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->blend_equation = mode;
}
void glBlendColor(GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
{
SET_VEC4(c->blend_color, clampf_01(red), clampf_01(green), clampf_01(blue), clampf_01(alpha));
}
void glLogicOp(GLenum opcode)
{
if (opcode < GL_CLEAR || opcode > GL_INVERT) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->logic_func = opcode;
}
void glPolygonOffset(GLfloat factor, GLfloat units)
{
c->poly_factor = factor;
c->poly_units = units;
}
void glScissor(GLint x, GLint y, GLsizei width, GLsizei height)
{
// once again why is GLsizei not unsigned?
if (width < 0 || height < 0) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
c->scissor_lx = x;
c->scissor_ly = y;
c->scissor_ux = x+width;
c->scissor_uy = y+height;
}
void glStencilFunc(GLenum func, GLint ref, GLuint mask)
{
if (func < GL_LESS || func > GL_NEVER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->stencil_func = func;
c->stencil_func_back = func;
// TODO clamp byte function?
if (ref > 255)
ref = 255;
if (ref < 0)
ref = 0;
c->stencil_ref = ref;
c->stencil_ref_back = ref;
c->stencil_valuemask = mask;
c->stencil_valuemask_back = mask;
}
void glStencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask)
{
if (face < GL_FRONT || face > GL_FRONT_AND_BACK) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (face == GL_FRONT_AND_BACK) {
glStencilFunc(func, ref, mask);
return;
}
if (func < GL_LESS || func > GL_NEVER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO clamp byte function?
if (ref > 255)
ref = 255;
if (ref < 0)
ref = 0;
if (face == GL_FRONT) {
c->stencil_func = func;
c->stencil_ref = ref;
c->stencil_valuemask = mask;
} else {
c->stencil_func_back = func;
c->stencil_ref_back = ref;
c->stencil_valuemask_back = mask;
}
}
void glStencilOp(GLenum sfail, GLenum dpfail, GLenum dppass)
{
// TODO not sure if I should check all parameters first or
// allow partial success?
//
// Also, how best to check when the enums aren't contiguous? empty switch?
// manually checking all enums?
if ((sfail < GL_INVERT || sfail > GL_DECR_WRAP) && sfail != GL_ZERO ||
(dpfail < GL_INVERT || dpfail > GL_DECR_WRAP) && sfail != GL_ZERO ||
(dppass < GL_INVERT || dppass > GL_DECR_WRAP) && sfail != GL_ZERO) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
c->stencil_sfail = sfail;
c->stencil_dpfail = dpfail;
c->stencil_dppass = dppass;
c->stencil_sfail_back = sfail;
c->stencil_dpfail_back = dpfail;
c->stencil_dppass_back = dppass;
}
void glStencilOpSeparate(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass)
{
if (face < GL_FRONT || face > GL_FRONT_AND_BACK) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (face == GL_FRONT_AND_BACK) {
glStencilOp(sfail, dpfail, dppass);
return;
}
if ((sfail < GL_INVERT || sfail > GL_DECR_WRAP) && sfail != GL_ZERO ||
(dpfail < GL_INVERT || dpfail > GL_DECR_WRAP) && sfail != GL_ZERO ||
(dppass < GL_INVERT || dppass > GL_DECR_WRAP) && sfail != GL_ZERO) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (face == GL_FRONT) {
c->stencil_sfail = sfail;
c->stencil_dpfail = dpfail;
c->stencil_dppass = dppass;
} else {
c->stencil_sfail_back = sfail;
c->stencil_dpfail_back = dpfail;
c->stencil_dppass_back = dppass;
}
}
void glClearStencil(GLint s)
{
// stencil is 8 bit bytes so just hardcoding FF here
c->clear_stencil = s & 0xFF;
}
void glStencilMask(GLuint mask)
{
c->stencil_writemask = mask;
c->stencil_writemask_back = mask;
}
void glStencilMaskSeparate(GLenum face, GLuint mask)
{
if (face < GL_FRONT || face > GL_FRONT_AND_BACK) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (face == GL_FRONT_AND_BACK) {
glStencilMask(mask);
return;
}
if (face == GL_FRONT) {
c->stencil_writemask = mask;
} else {
c->stencil_writemask_back = mask;
}
}
// Just wrap my pgl extension getter, unmap does nothing
void* glMapBuffer(GLenum target, GLenum access)
{
if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return NULL;
}
if (access != GL_READ_ONLY && access != GL_WRITE_ONLY && access != GL_READ_WRITE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return NULL;
}
// adjust to access bound_buffers
target -= GL_ARRAY_BUFFER;
void* data = NULL;
pglGetBufferData(c->bound_buffers[target], &data);
return data;
}
void* glMapNamedBuffer(GLuint buffer, GLenum access)
{
// pglGetBufferData will verify buffer is valid
if (access != GL_READ_ONLY && access != GL_WRITE_ONLY && access != GL_READ_WRITE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return NULL;
}
void* data = NULL;
pglGetBufferData(buffer, &data);
return data;
}
// Stubs to let real OpenGL libs compile with minimal modifications/ifdefs
// add what you need
void glGenerateMipmap(GLenum target)
{
//TODO not implemented, not sure it's worth it.
//For example mipmap generation code see
//https://github.com/thebeast33/cro_lib/blob/master/cro_mipmap.h
}
void glGetDoublev(GLenum pname, GLdouble* params) { }
void glGetInteger64v(GLenum pname, GLint64* params) { }
// Framebuffers/Renderbuffers
void glGenFramebuffers(GLsizei n, GLuint* ids) {}
void glBindFramebuffer(GLenum target, GLuint framebuffer) {}
void glDeleteFramebuffers(GLsizei n, GLuint* framebuffers) {}
void glFramebufferTexture(GLenum target, GLenum attachment, GLuint texture, GLint level) {}
void glFramebufferTexture1D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) {}
void glFramebufferTexture2D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) {}
void glFramebufferTexture3D(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint layer) {}
GLboolean glIsFramebuffer(GLuint framebuffer) { return GL_FALSE; }
void glGenRenderbuffers(GLsizei n, GLuint* renderbuffers) {}
void glBindRenderbuffer(GLenum target, GLuint renderbuffer) {}
void glDeleteRenderbuffers(GLsizei n, const GLuint* renderbuffers) {}
void glRenderbufferStorage(GLenum target, GLenum internalformat, GLsizei width, GLsizei height) {}
GLboolean glIsRenderbuffer(GLuint renderbuffer) { return GL_FALSE; }
void glFramebufferRenderbuffer(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer) {}
// Could also return GL_FRAMEBUFFER_UNDEFINED, but then I'd have to add all
// those enums and really 0 signaling an error makes more sense
GLenum glCheckFramebufferStatus(GLenum target) { return 0; }
void glGetProgramiv(GLuint program, GLenum pname, GLint* params) { }
void glGetProgramInfoLog(GLuint program, GLsizei maxLength, GLsizei* length, GLchar* infoLog) { }
void glAttachShader(GLuint program, GLuint shader) { }
void glCompileShader(GLuint shader) { }
void glGetShaderInfoLog(GLuint shader, GLsizei maxLength, GLsizei* length, GLchar* infoLog) { }
void glLinkProgram(GLuint program) { }
void glShaderSource(GLuint shader, GLsizei count, const GLchar** string, const GLint* length) { }
void glGetShaderiv(GLuint shader, GLenum pname, GLint* params) { }
void glDeleteShader(GLuint shader) { }
void glDetachShader(GLuint program, GLuint shader) { }
GLuint glCreateProgram() { return 0; }
GLuint glCreateShader(GLenum shaderType) { return 0; }
GLint glGetUniformLocation(GLuint program, const GLchar* name) { return 0; }
GLint glGetAttribLocation(GLuint program, const GLchar* name) { return 0; }
GLboolean glUnmapBuffer(GLenum target) { return GL_TRUE; }
GLboolean glUnmapNamedBuffer(GLuint buffer) { return GL_TRUE; }
// TODO
void glLineWidth(GLfloat width) { }
void glActiveTexture(GLenum texture) { }
void glTexParameterfv(GLenum target, GLenum pname, const GLfloat* params) { }
void glUniform1f(GLint location, GLfloat v0) { }
void glUniform2f(GLint location, GLfloat v0, GLfloat v1) { }
void glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2) { }
void glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) { }
void glUniform1i(GLint location, GLint v0) { }
void glUniform2i(GLint location, GLint v0, GLint v1) { }
void glUniform3i(GLint location, GLint v0, GLint v1, GLint v2) { }
void glUniform4i(GLint location, GLint v0, GLint v1, GLint v2, GLint v3) { }
void glUniform1ui(GLuint location, GLuint v0) { }
void glUniform2ui(GLuint location, GLuint v0, GLuint v1) { }
void glUniform3ui(GLuint location, GLuint v0, GLuint v1, GLuint v2) { }
void glUniform4ui(GLuint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3) { }
void glUniform1fv(GLint location, GLsizei count, const GLfloat* value) { }
void glUniform2fv(GLint location, GLsizei count, const GLfloat* value) { }
void glUniform3fv(GLint location, GLsizei count, const GLfloat* value) { }
void glUniform4fv(GLint location, GLsizei count, const GLfloat* value) { }
void glUniform1iv(GLint location, GLsizei count, const GLint* value) { }
void glUniform2iv(GLint location, GLsizei count, const GLint* value) { }
void glUniform3iv(GLint location, GLsizei count, const GLint* value) { }
void glUniform4iv(GLint location, GLsizei count, const GLint* value) { }
void glUniform1uiv(GLint location, GLsizei count, const GLuint* value) { }
void glUniform2uiv(GLint location, GLsizei count, const GLuint* value) { }
void glUniform3uiv(GLint location, GLsizei count, const GLuint* value) { }
void glUniform4uiv(GLint location, GLsizei count, const GLuint* value) { }
void glUniformMatrix2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix2x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix2x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
void glUniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) { }
/*************************************
* GLSL(ish) functions
*************************************/
float clampf_01(float f)
{
if (f < 0.0f) return 0.0f;
if (f > 1.0f) return 1.0f;
return f;
}
float clampf(float f, float min, float max)
{
if (f < min) return min;
if (f > max) return max;
return f;
}
int clampi(int i, int min, int max)
{
if (i < min) return min;
if (i > max) return max;
return i;
}
#define imod(a, b) (a) - (b) * ((a)/(b))
static int wrap(int i, int size, GLenum mode)
{
int tmp, tmp2;
switch (mode)
{
case GL_REPEAT:
tmp = imod(i, size);
if (tmp < 0) tmp = size + tmp;
return tmp;
// Border is too much of a pain to implement with render to
// texture. Trade offs in poor performance or ugly extra code
// for a feature that almost no one actually uses and even
// when it is used (barring rare/odd uv coordinates) it's not
// even noticable.
//case GL_CLAMP_TO_BORDER:
//return clampi(i, -1, size);
case GL_CLAMP_TO_BORDER: // just so stuff that uses it compiles
case GL_CLAMP_TO_EDGE:
return clampi(i, 0, size-1);
case GL_MIRRORED_REPEAT:
if (i < 0) i = -i;
tmp = i / size;
tmp2 = i / (2*size); // TODO what was this for?
if (tmp % 2)
return (size-1) - (i - tmp * size);
else
return i - tmp * size;
return tmp;
default:
//should never happen, get rid of compile warning
assert(0);
return 0;
}
}
#undef imod
// used in the following 4 texture access functions
// Not sure if it's actually necessary since wrap() clamps
#define EPSILON 0.000001
vec4 texture1D(GLuint tex, float x)
{
int i0, i1;
glTexture* t = &c->textures.a[tex];
Color* texdata = (Color*)t->data;
double w = t->w - EPSILON;
double xw = x * w;
if (t->mag_filter == GL_NEAREST) {
i0 = wrap(floor(xw), t->w, t->wrap_s);
return Color_to_vec4(texdata[i0]);
} else {
// LINEAR
// This seems right to me since pixel centers are 0.5 but
// this isn't exactly what's described in the spec or FoCG
i0 = wrap(floor(xw - 0.5), t->w, t->wrap_s);
i1 = wrap(floor(xw + 0.499999), t->w, t->wrap_s);
double tmp2;
double alpha = modf(xw+0.5, &tmp2);
if (alpha < 0) ++alpha;
//hermite smoothing is optional
//looks like my nvidia implementation doesn't do it
//but it can look a little better
#ifdef HERMITE_SMOOTHING
alpha = alpha*alpha * (3 - 2*alpha);
#endif
vec4 ci = Color_to_vec4(texdata[i0]);
vec4 ci1 = Color_to_vec4(texdata[i1]);
ci = scale_vec4(ci, (1-alpha));
ci1 = scale_vec4(ci1, alpha);
ci = add_vec4s(ci, ci1);
return ci;
}
}
vec4 texture2D(GLuint tex, float x, float y)
{
int i0, j0, i1, j1;
glTexture* t = &c->textures.a[tex];
Color* texdata = (Color*)t->data;
int w = t->w;
int h = t->h;
double dw = w - EPSILON;
double dh = h - EPSILON;
double xw = x * dw;
double yh = y * dh;
//TODO don't just use mag_filter all the time?
//is it worth bothering?
if (t->mag_filter == GL_NEAREST) {
i0 = wrap(floor(xw), w, t->wrap_s);
j0 = wrap(floor(yh), h, t->wrap_t);
return Color_to_vec4(texdata[j0*w + i0]);
} else {
// LINEAR
// This seems right to me since pixel centers are 0.5 but
// this isn't exactly what's described in the spec or FoCG
i0 = wrap(floor(xw - 0.5), w, t->wrap_s);
j0 = wrap(floor(yh - 0.5), h, t->wrap_t);
i1 = wrap(floor(xw + 0.499999), w, t->wrap_s);
j1 = wrap(floor(yh + 0.499999), h, t->wrap_t);
double tmp2;
double alpha = modf(xw+0.5, &tmp2);
double beta = modf(yh+0.5, &tmp2);
if (alpha < 0) ++alpha;
if (beta < 0) ++beta;
//hermite smoothing is optional
//looks like my nvidia implementation doesn't do it
//but it can look a little better
#ifdef HERMITE_SMOOTHING
alpha = alpha*alpha * (3 - 2*alpha);
beta = beta*beta * (3 - 2*beta);
#endif
vec4 cij = Color_to_vec4(texdata[j0*w + i0]);
vec4 ci1j = Color_to_vec4(texdata[j0*w + i1]);
vec4 cij1 = Color_to_vec4(texdata[j1*w + i0]);
vec4 ci1j1 = Color_to_vec4(texdata[j1*w + i1]);
cij = scale_vec4(cij, (1-alpha)*(1-beta));
ci1j = scale_vec4(ci1j, alpha*(1-beta));
cij1 = scale_vec4(cij1, (1-alpha)*beta);
ci1j1 = scale_vec4(ci1j1, alpha*beta);
cij = add_vec4s(cij, ci1j);
cij = add_vec4s(cij, cij1);
cij = add_vec4s(cij, ci1j1);
return cij;
}
}
vec4 texture3D(GLuint tex, float x, float y, float z)
{
int i0, j0, i1, j1, k0, k1;
glTexture* t = &c->textures.a[tex];
Color* texdata = (Color*)t->data;
double dw = t->w - EPSILON;
double dh = t->h - EPSILON;
double dd = t->d - EPSILON;
int w = t->w;
int h = t->h;
int d = t->d;
int plane = w * t->h;
double xw = x * dw;
double yh = y * dh;
double zd = z * dd;
if (t->mag_filter == GL_NEAREST) {
i0 = wrap(floor(xw), w, t->wrap_s);
j0 = wrap(floor(yh), h, t->wrap_t);
k0 = wrap(floor(zd), d, t->wrap_r);
return Color_to_vec4(texdata[k0*plane + j0*w + i0]);
} else {
// LINEAR
// This seems right to me since pixel centers are 0.5 but
// this isn't exactly what's described in the spec or FoCG
i0 = wrap(floor(xw - 0.5), w, t->wrap_s);
j0 = wrap(floor(yh - 0.5), h, t->wrap_t);
k0 = wrap(floor(zd - 0.5), d, t->wrap_r);
i1 = wrap(floor(xw + 0.499999), w, t->wrap_s);
j1 = wrap(floor(yh + 0.499999), h, t->wrap_t);
k1 = wrap(floor(zd + 0.499999), d, t->wrap_r);
double tmp2;
double alpha = modf(xw+0.5, &tmp2);
double beta = modf(yh+0.5, &tmp2);
double gamma = modf(zd+0.5, &tmp2);
if (alpha < 0) ++alpha;
if (beta < 0) ++beta;
if (gamma < 0) ++gamma;
//hermite smoothing is optional
//looks like my nvidia implementation doesn't do it
//but it can look a little better
#ifdef HERMITE_SMOOTHING
alpha = alpha*alpha * (3 - 2*alpha);
beta = beta*beta * (3 - 2*beta);
gamma = gamma*gamma * (3 - 2*gamma);
#endif
vec4 cijk = Color_to_vec4(texdata[k0*plane + j0*w + i0]);
vec4 ci1jk = Color_to_vec4(texdata[k0*plane + j0*w + i1]);
vec4 cij1k = Color_to_vec4(texdata[k0*plane + j1*w + i0]);
vec4 ci1j1k = Color_to_vec4(texdata[k0*plane + j1*w + i1]);
vec4 cijk1 = Color_to_vec4(texdata[k1*plane + j0*w + i0]);
vec4 ci1jk1 = Color_to_vec4(texdata[k1*plane + j0*w + i1]);
vec4 cij1k1 = Color_to_vec4(texdata[k1*plane + j1*w + i0]);
vec4 ci1j1k1 = Color_to_vec4(texdata[k1*plane + j1*w + i1]);
cijk = scale_vec4(cijk, (1-alpha)*(1-beta)*(1-gamma));
ci1jk = scale_vec4(ci1jk, alpha*(1-beta)*(1-gamma));
cij1k = scale_vec4(cij1k, (1-alpha)*beta*(1-gamma));
ci1j1k = scale_vec4(ci1j1k, alpha*beta*(1-gamma));
cijk1 = scale_vec4(cijk1, (1-alpha)*(1-beta)*gamma);
ci1jk1 = scale_vec4(ci1jk1, alpha*(1-beta)*gamma);
cij1k1 = scale_vec4(cij1k1, (1-alpha)*beta*gamma);
ci1j1k1 = scale_vec4(ci1j1k1, alpha*beta*gamma);
cijk = add_vec4s(cijk, ci1jk);
cijk = add_vec4s(cijk, cij1k);
cijk = add_vec4s(cijk, ci1j1k);
cijk = add_vec4s(cijk, cijk1);
cijk = add_vec4s(cijk, ci1jk1);
cijk = add_vec4s(cijk, cij1k1);
cijk = add_vec4s(cijk, ci1j1k1);
return cijk;
}
}
// for now this should work
vec4 texture2DArray(GLuint tex, float x, float y, int z)
{
int i0, j0, i1, j1;
glTexture* t = &c->textures.a[tex];
Color* texdata = (Color*)t->data;
int w = t->w;
int h = t->h;
double dw = w - EPSILON;
double dh = h - EPSILON;
int plane = w * h;
double xw = x * dw;
double yh = y * dh;
if (t->mag_filter == GL_NEAREST) {
i0 = wrap(floor(xw), w, t->wrap_s);
j0 = wrap(floor(yh), h, t->wrap_t);
return Color_to_vec4(texdata[z*plane + j0*w + i0]);
} else {
// LINEAR
// This seems right to me since pixel centers are 0.5 but
// this isn't exactly what's described in the spec or FoCG
i0 = wrap(floor(xw - 0.5), w, t->wrap_s);
j0 = wrap(floor(yh - 0.5), h, t->wrap_t);
i1 = wrap(floor(xw + 0.499999), w, t->wrap_s);
j1 = wrap(floor(yh + 0.499999), h, t->wrap_t);
double tmp2;
double alpha = modf(xw+0.5, &tmp2);
double beta = modf(yh+0.5, &tmp2);
if (alpha < 0) ++alpha;
if (beta < 0) ++beta;
//hermite smoothing is optional
//looks like my nvidia implementation doesn't do it
//but it can look a little better
#ifdef HERMITE_SMOOTHING
alpha = alpha*alpha * (3 - 2*alpha);
beta = beta*beta * (3 - 2*beta);
#endif
vec4 cij = Color_to_vec4(texdata[z*plane + j0*w + i0]);
vec4 ci1j = Color_to_vec4(texdata[z*plane + j0*w + i1]);
vec4 cij1 = Color_to_vec4(texdata[z*plane + j1*w + i0]);
vec4 ci1j1 = Color_to_vec4(texdata[z*plane + j1*w + i1]);
cij = scale_vec4(cij, (1-alpha)*(1-beta));
ci1j = scale_vec4(ci1j, alpha*(1-beta));
cij1 = scale_vec4(cij1, (1-alpha)*beta);
ci1j1 = scale_vec4(ci1j1, alpha*beta);
cij = add_vec4s(cij, ci1j);
cij = add_vec4s(cij, cij1);
cij = add_vec4s(cij, ci1j1);
return cij;
}
}
vec4 texture_rect(GLuint tex, float x, float y)
{
int i0, j0, i1, j1;
glTexture* t = &c->textures.a[tex];
Color* texdata = (Color*)t->data;
int w = t->w;
int h = t->h;
double xw = x;
double yh = y;
//TODO don't just use mag_filter all the time?
//is it worth bothering?
if (t->mag_filter == GL_NEAREST) {
i0 = wrap(floor(xw), w, t->wrap_s);
j0 = wrap(floor(yh), h, t->wrap_t);
return Color_to_vec4(texdata[j0*w + i0]);
} else {
// LINEAR
// This seems right to me since pixel centers are 0.5 but
// this isn't exactly what's described in the spec or FoCG
i0 = wrap(floor(xw - 0.5), w, t->wrap_s);
j0 = wrap(floor(yh - 0.5), h, t->wrap_t);
i1 = wrap(floor(xw + 0.499999), w, t->wrap_s);
j1 = wrap(floor(yh + 0.499999), h, t->wrap_t);
double tmp2;
double alpha = modf(xw+0.5, &tmp2);
double beta = modf(yh+0.5, &tmp2);
if (alpha < 0) ++alpha;
if (beta < 0) ++beta;
//hermite smoothing is optional
//looks like my nvidia implementation doesn't do it
//but it can look a little better
#ifdef HERMITE_SMOOTHING
alpha = alpha*alpha * (3 - 2*alpha);
beta = beta*beta * (3 - 2*beta);
#endif
vec4 cij = Color_to_vec4(texdata[j0*w + i0]);
vec4 ci1j = Color_to_vec4(texdata[j0*w + i1]);
vec4 cij1 = Color_to_vec4(texdata[j1*w + i0]);
vec4 ci1j1 = Color_to_vec4(texdata[j1*w + i1]);
cij = scale_vec4(cij, (1-alpha)*(1-beta));
ci1j = scale_vec4(ci1j, alpha*(1-beta));
cij1 = scale_vec4(cij1, (1-alpha)*beta);
ci1j1 = scale_vec4(ci1j1, alpha*beta);
cij = add_vec4s(cij, ci1j);
cij = add_vec4s(cij, cij1);
cij = add_vec4s(cij, ci1j1);
return cij;
}
}
vec4 texture_cubemap(GLuint texture, float x, float y, float z)
{
glTexture* tex = &c->textures.a[texture];
Color* texdata = (Color*)tex->data;
float x_mag = (x < 0) ? -x : x;
float y_mag = (y < 0) ? -y : y;
float z_mag = (z < 0) ? -z : z;
float s, t, max;
int p, i0, j0, i1, j1;
//there should be a better/shorter way to do this ...
if (x_mag > y_mag) {
if (x_mag > z_mag) { //x largest
max = x_mag;
t = -y;
if (x_mag == x) {
p = 0;
s = -z;
} else {
p = 1;
s = z;
}
} else { //z largest
max = z_mag;
t = -y;
if (z_mag == z) {
p = 4;
s = x;
} else {
p = 5;
s = -x;
}
}
} else {
if (y_mag > z_mag) { //y largest
max = y_mag;
s = x;
if (y_mag == y) {
p = 2;
t = z;
} else {
p = 3;
t = -z;
}
} else { //z largest
max = z_mag;
t = -y;
if (z_mag == z) {
p = 4;
s = x;
} else {
p = 5;
s = -x;
}
}
}
x = (s/max + 1.0f)/2.0f;
y = (t/max + 1.0f)/2.0f;
int w = tex->w;
int h = tex->h;
double dw = w - EPSILON;
double dh = h - EPSILON;
int plane = w*w;
double xw = x * dw;
double yh = y * dh;
if (tex->mag_filter == GL_NEAREST) {
i0 = wrap(floor(xw), w, tex->wrap_s);
j0 = wrap(floor(yh), h, tex->wrap_t);
vec4 tmpvec4 = Color_to_vec4(texdata[p*plane + j0*w + i0]);
return tmpvec4;
} else {
// LINEAR
// This seems right to me since pixel centers are 0.5 but
// this isn't exactly what's described in the spec or FoCG
i0 = wrap(floor(xw - 0.5), tex->w, tex->wrap_s);
j0 = wrap(floor(yh - 0.5), tex->h, tex->wrap_t);
i1 = wrap(floor(xw + 0.499999), tex->w, tex->wrap_s);
j1 = wrap(floor(yh + 0.499999), tex->h, tex->wrap_t);
double tmp2;
double alpha = modf(xw+0.5, &tmp2);
double beta = modf(yh+0.5, &tmp2);
if (alpha < 0) ++alpha;
if (beta < 0) ++beta;
//hermite smoothing is optional
//looks like my nvidia implementation doesn't do it
//but it can look a little better
#ifdef HERMITE_SMOOTHING
alpha = alpha*alpha * (3 - 2*alpha);
beta = beta*beta * (3 - 2*beta);
#endif
vec4 cij = Color_to_vec4(texdata[p*plane + j0*w + i0]);
vec4 ci1j = Color_to_vec4(texdata[p*plane + j0*w + i1]);
vec4 cij1 = Color_to_vec4(texdata[p*plane + j1*w + i0]);
vec4 ci1j1 = Color_to_vec4(texdata[p*plane + j1*w + i1]);
cij = scale_vec4(cij, (1-alpha)*(1-beta));
ci1j = scale_vec4(ci1j, alpha*(1-beta));
cij1 = scale_vec4(cij1, (1-alpha)*beta);
ci1j1 = scale_vec4(ci1j1, alpha*beta);
cij = add_vec4s(cij, ci1j);
cij = add_vec4s(cij, cij1);
cij = add_vec4s(cij, ci1j1);
return cij;
}
}
#undef EPSILON
//Raw draw functions that bypass the OpenGL pipeline and draw
//points/lines/triangles directly to the framebuffer, modify as needed.
//
//Example modifications:
//add the blending part of OpenGL to put_pixel
//change them to take vec4's instead of Color's
//change put_triangle to draw all one color or have a separate path/function
//that draws a single color triangle faster (no need to blend)
//
//pass the framebuffer in instead of drawing to c->back_buffer so
//you can use it elsewhere, independently of a glContext
//etc.
//
void pglClearScreen()
{
memset(c->back_buffer.buf, 255, c->back_buffer.w * c->back_buffer.h * 4);
}
void pglSetInterp(GLsizei n, GLenum* interpolation)
{
c->programs.a[c->cur_program].vs_output_size = n;
c->vs_output.size = n;
memcpy(c->programs.a[c->cur_program].interpolation, interpolation, n*sizeof(GLenum));
cvec_reserve_float(&c->vs_output.output_buf, n * MAX_VERTICES);
//vs_output.interpolation would be already pointing at current program's array
//unless the programs array was realloced since the last glUseProgram because
//they've created a bunch of programs. Unlikely they'd be changing a shader
//before creating all their shaders but whatever.
c->vs_output.interpolation = c->programs.a[c->cur_program].interpolation;
}
//TODO
//pglDrawRect(x, y, w, h)
//pglDrawPoint(x, y)
void pglDrawFrame()
{
frag_func frag_shader = c->programs.a[c->cur_program].fragment_shader;
Shader_Builtins builtins;
#pragma omp parallel for private(builtins)
for (int y=0; y<c->back_buffer.h; ++y) {
for (int x=0; x<c->back_buffer.w; ++x) {
//ignore z and w components
builtins.gl_FragCoord.x = x + 0.5f;
builtins.gl_FragCoord.y = y + 0.5f;
builtins.discard = GL_FALSE;
frag_shader(NULL, &builtins, c->programs.a[c->cur_program].uniform);
if (!builtins.discard)
draw_pixel(builtins.gl_FragColor, x, y, 0.0f); //depth isn't used for pglDrawFrame
}
}
}
void pglBufferData(GLenum target, GLsizei size, const GLvoid* data, GLenum usage)
{
if (target != GL_ARRAY_BUFFER && target != GL_ELEMENT_ARRAY_BUFFER) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
//check for usage later
target -= GL_ARRAY_BUFFER;
if (c->bound_buffers[target] == 0) {
if (!c->error)
c->error = GL_INVALID_OPERATION;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// TODO Should I change this in spec functions too? Or just say don't mix them
// otherwise bad things/undefined behavior??
if (!c->buffers.a[c->bound_buffers[target]].user_owned) {
free(c->buffers.a[c->bound_buffers[target]].data);
}
// user_owned buffer, just assign the pointer, will not free
c->buffers.a[c->bound_buffers[target]].data = (u8*)data;
c->buffers.a[c->bound_buffers[target]].user_owned = GL_TRUE;
c->buffers.a[c->bound_buffers[target]].size = size;
if (target == GL_ELEMENT_ARRAY_BUFFER) {
c->vertex_arrays.a[c->cur_vertex_array].element_buffer = c->bound_buffers[target];
}
}
void pglTexImage1D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_1D) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
if (type != GL_UNSIGNED_BYTE) {
return;
}
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
//TODO
//assume for now always RGBA coming in and that's what I'm storing it as
}
void pglTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
//GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D, GL_TEXTURE_1D_ARRAY, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_RECTANGLE, or GL_TEXTURE_CUBE_MAP.
//will add others as they're implemented
if (target != GL_TEXTURE_2D &&
target != GL_TEXTURE_RECTANGLE &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_X &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_X &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Y &&
target != GL_TEXTURE_CUBE_MAP_POSITIVE_Z &&
target != GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
//TODO support other types?
if (type != GL_UNSIGNED_BYTE) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO I don't actually support anything other than GL_RGBA for input or
// internal format ... so I should probably make the others errors and
// I'm not even checking internalFormat currently..
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
int cur_tex;
if (target == GL_TEXTURE_2D || target == GL_TEXTURE_RECTANGLE) {
cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = height;
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
// If you're using these pgl mapped functions, it assumes you are respecting
// your own current unpack alignment settings already
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
} else { //CUBE_MAP
/*
* TODO, doesn't make sense to call this six times when mapping, you'd set
* them all up beforehand and set the pointer once...so change this or
* make a pglCubeMapData() function?
*
cur_tex = c->bound_textures[GL_TEXTURE_CUBE_MAP-GL_TEXTURE_UNBOUND-1];
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
if (width != height) {
//TODO spec says INVALID_VALUE, man pages say INVALID_ENUM ?
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
int mem_size = width*height*6 * components;
if (c->textures.a[cur_tex].w == 0) {
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = width; //same cause square
} else if (c->textures.a[cur_tex].w != width) {
//TODO spec doesn't say all sides must have same dimensions but it makes sense
//and this site suggests it http://www.opengl.org/wiki/Cubemap_Texture
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
target -= GL_TEXTURE_CUBE_MAP_POSITIVE_X; //use target as plane index
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
*/
} //end CUBE_MAP
}
void pglTexImage3D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data)
{
if (target != GL_TEXTURE_3D && target != GL_TEXTURE_2D_ARRAY) {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
if (border) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
// data can't be null for user_owned data
if (!data) {
if (!c->error)
c->error = GL_INVALID_VALUE;
return;
}
//ignore level for now
int cur_tex = c->bound_textures[target-GL_TEXTURE_UNBOUND-1];
c->textures.a[cur_tex].w = width;
c->textures.a[cur_tex].h = height;
c->textures.a[cur_tex].d = depth;
if (type != GL_UNSIGNED_BYTE) {
// TODO
return;
}
// TODO add error? only support GL_RGBA for now
int components;
if (format == GL_RED) components = 1;
else if (format == GL_RG) components = 2;
else if (format == GL_RGB || format == GL_BGR) components = 3;
else if (format == GL_RGBA || format == GL_BGRA) components = 4;
else {
if (!c->error)
c->error = GL_INVALID_ENUM;
return;
}
// TODO see pglBufferData
if (!c->textures.a[cur_tex].user_owned)
free(c->textures.a[cur_tex].data);
//TODO support other internal formats? components should be of internalformat not format
c->textures.a[cur_tex].data = (u8*)data;
c->textures.a[cur_tex].user_owned = GL_TRUE;
//TODO
//assume for now always RGBA coming in and that's what I'm storing it as
}
void pglGetBufferData(GLuint buffer, GLvoid** data)
{
// why'd you even call it?
if (!data) {
if (!c->error) {
c->error = GL_INVALID_VALUE;
}
return;
}
if (buffer && buffer < c->buffers.size && !c->buffers.a[buffer].deleted) {
*data = c->buffers.a[buffer].data;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION; // matching error code of binding invalid buffer
}
}
void pglGetTextureData(GLuint texture, GLvoid** data)
{
// why'd you even call it?
if (!data) {
if (!c->error) {
c->error = GL_INVALID_VALUE;
}
return;
}
if (texture < c->textures.size && !c->textures.a[texture].deleted) {
*data = c->textures.a[texture].data;
} else if (!c->error) {
c->error = GL_INVALID_OPERATION; // matching error code of binding invalid buffer
}
}
void put_pixel(Color color, int x, int y)
{
u32* dest = &((u32*)c->back_buffer.lastrow)[-y*c->back_buffer.w + x];
*dest = color.a << c->Ashift | color.r << c->Rshift | color.g << c->Gshift | color.b << c->Bshift;
}
//Should I have it take a glFramebuffer as paramater?
void put_line(Color the_color, float x1, float y1, float x2, float y2)
{
float tmp;
//always draw from left to right
if (x2 < x1) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
}
//calculate slope and implicit line parameters once
float m = (y2-y1)/(x2-x1);
float A = y1 - y2;
float B = x2 - x1;
float C = x1*y2 -x2*y1;
int x, y;
float x_min = MAX(0, MIN(x1, x2));
float x_max = MIN(c->back_buffer.w-1, MAX(x1, x2));
float y_min = MAX(0, MIN(y1, y2));
float y_max = MIN(c->back_buffer.h-1, MAX(y1, y2));
//4 cases based on slope
if (m <= -1) { //(-infinite, -1]
x = x1;
for (y=y_max; y>=y_min; --y) {
put_pixel(the_color, x, y);
if (A*(x+0.5f) + B*(y-1) + C < 0)
x++;
}
} else if (m <= 0) { //(-1, 0]
y = y1;
for (x=x_min; x<=x_max; ++x) {
put_pixel(the_color, x, y);
if (A*(x+1) + B*(y-0.5f) + C > 0)
y--;
}
} else if (m <= 1) { //(0, 1]
y = y1;
for (x=x_min; x<=x_max; ++x) {
put_pixel(the_color, x, y);
if (A*(x+1) + B*(y+0.5f) + C < 0)
y++;
}
} else { //(1, +infinite)
x = x1;
for (y=y_min; y<=y_max; ++y) {
put_pixel(the_color, x, y);
if (A*(x+0.5f) + B*(y+1) + C > 0)
x++;
}
}
}
void put_triangle(Color c1, Color c2, Color c3, vec2 p1, vec2 p2, vec2 p3)
{
//can't think of a better/cleaner way to do this than these 8 lines
float x_min = MIN(floor(p1.x), floor(p2.x));
float x_max = MAX(ceil(p1.x), ceil(p2.x));
float y_min = MIN(floor(p1.y), floor(p2.y));
float y_max = MAX(ceil(p1.y), ceil(p2.y));
x_min = MIN(floor(p3.x), x_min);
x_max = MAX(ceil(p3.x), x_max);
y_min = MIN(floor(p3.y), y_min);
y_max = MAX(ceil(p3.y), y_max);
x_min = MAX(0, x_min);
x_max = MIN(c->back_buffer.w-1, x_max);
y_min = MAX(0, y_min);
y_max = MIN(c->back_buffer.h-1, y_max);
//form implicit lines
Line l12 = make_Line(p1.x, p1.y, p2.x, p2.y);
Line l23 = make_Line(p2.x, p2.y, p3.x, p3.y);
Line l31 = make_Line(p3.x, p3.y, p1.x, p1.y);
float alpha, beta, gamma;
Color c;
float x, y;
//y += 0.5f; //center of pixel
// TODO(rswinkle): floor( + 0.5f) like draw_triangle?
for (y=y_min; y<=y_max; ++y) {
for (x=x_min; x<=x_max; ++x) {
gamma = line_func(&l12, x, y)/line_func(&l12, p3.x, p3.y);
beta = line_func(&l31, x, y)/line_func(&l31, p2.x, p2.y);
alpha = 1 - beta - gamma;
if (alpha >= 0 && beta >= 0 && gamma >= 0)
//if it's on the edge (==0), draw if the opposite vertex is on the same side as arbitrary point -1, -1
//this is a deterministic way of choosing which triangle gets a pixel for trinagles that share
//edges
if ((alpha > 0 || line_func(&l23, p1.x, p1.y) * line_func(&l23, -1, -1) > 0) &&
(beta > 0 || line_func(&l31, p2.x, p2.y) * line_func(&l31, -1, -1) > 0) &&
(gamma > 0 || line_func(&l12, p3.x, p3.y) * line_func(&l12, -1, -1) > 0)) {
//calculate interoplation here
c.r = alpha*c1.r + beta*c2.r + gamma*c3.r;
c.g = alpha*c1.g + beta*c2.g + gamma*c3.g;
c.b = alpha*c1.b + beta*c2.b + gamma*c3.b;
put_pixel(c, x, y);
}
}
}
}
#undef PORTABLEGL_IMPLEMENTATION
#undef CVECTOR_float_IMPLEMENTATION
#endif
#ifdef MANGLE_TYPES
#undef vec2
#undef vec3
#undef vec4
#undef dvec2
#undef dvec3
#undef dvec4
#undef ivec2
#undef ivec3
#undef ivec4
#undef uvec2
#undef uvec3
#undef uvec4
#undef mat2
#undef mat3
#undef mat4
#undef Color
#undef Line
#undef Plane
#endif
|
alignedattribute.c | #include <stdio.h>
void write_index(int*a, int N){
int*aptr __attribute__ ((aligned(64))) = a; // THIS FAILS
// int*aptr = a; // THIS WORKS
printf(" ===> Encounter target teams distribute par for map tofrom:aptr\n");
#pragma omp target teams distribute parallel for map(tofrom: aptr[0:N])
for(int i=0;i<N;i++) {
printf("updating aptr[%d] addr:%p\n",i,&aptr[i]);
aptr[i]=i;
}
}
int main(){
const int N = 10;
int a[N],validate[N];
for(int i=0;i<N;i++) {
a[i]=0;
validate[i]=i;
}
write_index(a,N);
int flag=-1; // Mark Success
for(int i=0;i<N;i++) {
if(a[i]!=validate[i]) {
// print 1st bad index
if( flag == -1 )
printf("First fail: a[%d](%d) != validate[%d](%d)\n",i,a[i],i,validate[i]);
flag = i;
}
}
if( flag == -1 ){
printf("Success\n");
return 0;
} else {
printf("Last fail: a[%d](%d) != validate[%d](%d)\n",flag,a[flag],flag,validate[flag]);
printf("Fail\n");
return 1;
}
}
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y.dptr_;
rs += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
h[j][k] = ht;
c[j][k] = ct;
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const int cell_size = N * H;
if (dhy_ptr != NULL) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != NULL) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const int row = T * N;
const int col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (int i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
for (int i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : NULL;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : NULL;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : NULL;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : NULL;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2: NULL;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != NULL)? bx_ptr + 3 * H * 2 : NULL;
DType* back_bh_ptr = (bh_ptr != NULL)? bh_ptr + 3 * H * 2 : NULL;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* tmp_buf = Mnh_l + L * D * T * N * H;
DType* ws2 = Mnh_l + L * D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* tmp_buf = Mnh_l + L * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = NULL;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = NULL;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
GB_binop__bset_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64)
// C=scalar+B GB (_bind1st__bset_int64)
// C=scalar+B' GB (_bind1st_tran__bset_int64)
// C=A+scalar GB (_bind2nd__bset_int64)
// C=A'+scalar GB (_bind2nd_tran__bset_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bset_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
convolution_3x3_pack1to4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#if __ARM_NEON && __aarch64__
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator);
#else
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
#endif
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f);
{
float* ptr = (float*)out0;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias0);
vst1q_f32(ptr + 12, _bias0);
vst1q_f32(ptr + 16, _bias1);
vst1q_f32(ptr + 20, _bias1);
vst1q_f32(ptr + 24, _bias1);
vst1q_f32(ptr + 28, _bias1);
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias1);
vst1q_f32(ptr + 12, _bias1);
ptr += 16;
}
for (; j < outw; j++)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias1);
ptr += 8;
}
}
}
const unsigned short* k0 = kernel.channel(p);
const unsigned short* k1 = kernel.channel(p + 1);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"ld1 {v1.s}[0], [%1] \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %17.4s, v0.s[0] \n"
"fmla v29.4s, %17.4s, v0.s[1] \n"
"fmla v30.4s, %17.4s, v0.s[2] \n"
"fmla v31.4s, %17.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"ld1 {v3.s}[0], [%2] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %18.4s, v0.s[1] \n"
"fmla v29.4s, %18.4s, v0.s[2] \n"
"fmla v30.4s, %18.4s, v0.s[3] \n"
"fmla v31.4s, %18.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %19.4s, v0.s[2] \n"
"fmla v29.4s, %19.4s, v0.s[3] \n"
"fmla v30.4s, %19.4s, v1.s[0] \n"
"fmla v31.4s, %19.4s, v1.s[1] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"fmla v28.4s, %20.4s, v2.s[0] \n"
"fmla v29.4s, %20.4s, v2.s[1] \n"
"fmla v30.4s, %20.4s, v2.s[2] \n"
"fmla v31.4s, %20.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"ld1 {v1.s}[0], [%3] \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"fmla v28.4s, %21.4s, v2.s[1] \n"
"fmla v29.4s, %21.4s, v2.s[2] \n"
"fmla v30.4s, %21.4s, v2.s[3] \n"
"fmla v31.4s, %21.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"fmla v28.4s, %22.4s, v2.s[2] \n"
"fmla v29.4s, %22.4s, v2.s[3] \n"
"fmla v30.4s, %22.4s, v3.s[0] \n"
"fmla v31.4s, %22.4s, v3.s[1] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %23.4s, v0.s[0] \n"
"fmla v29.4s, %23.4s, v0.s[1] \n"
"fmla v30.4s, %23.4s, v0.s[2] \n"
"fmla v31.4s, %23.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %24.4s, v0.s[1] \n"
"fmla v29.4s, %24.4s, v0.s[2] \n"
"fmla v30.4s, %24.4s, v0.s[3] \n"
"fmla v31.4s, %24.4s, v1.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %25.4s, v0.s[2] \n"
"fmla v29.4s, %25.4s, v0.s[3] \n"
"fmla v30.4s, %25.4s, v1.s[0] \n"
"fmla v31.4s, %25.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %17.4s, v0.s[0] \n"
"fmla v27.4s, %17.4s, v0.s[1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %18.4s, v0.s[1] \n"
"fmla v27.4s, %18.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %19.4s, v0.s[2] \n"
"fmla v27.4s, %19.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"fmla v26.4s, %20.4s, v1.s[0] \n"
"fmla v27.4s, %20.4s, v1.s[1] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n"
"fmla v24.4s, %12.4s, v1.s[1] \n"
"fmla v25.4s, %12.4s, v1.s[2] \n"
"fmla v26.4s, %21.4s, v1.s[1] \n"
"fmla v27.4s, %21.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %13.4s, v1.s[2] \n"
"fmla v25.4s, %13.4s, v1.s[3] \n"
"fmla v26.4s, %22.4s, v1.s[2] \n"
"fmla v27.4s, %22.4s, v1.s[3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %23.4s, v0.s[0] \n"
"fmla v27.4s, %23.4s, v0.s[1] \n"
"add %1, %1, #4 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %24.4s, v0.s[1] \n"
"fmla v27.4s, %24.4s, v0.s[2] \n"
"add %2, %2, #4 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %25.4s, v0.s[2] \n"
"fmla v27.4s, %25.4s, v0.s[3] \n"
"add %3, %3, #4 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "memory", "v0", "v1", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
float32x4_t _sum00 = vld1q_f32(outptr0);
float32x4_t _sum10 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0 + 4, _sum10);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
unsigned short* outptr1_bf16 = top_blob.channel(p + 1);
const float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"ld1 {v1.s}[0], [%3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
"fmla v24.4s, %12.4s, v0.s[0] \n"
"fmla v25.4s, %12.4s, v0.s[1] \n"
"fmla v26.4s, %12.4s, v0.s[2] \n"
"fmla v27.4s, %12.4s, v0.s[3] \n"
"fmla v28.4s, %21.4s, v0.s[0] \n"
"fmla v29.4s, %21.4s, v0.s[1] \n"
"fmla v30.4s, %21.4s, v0.s[2] \n"
"fmla v31.4s, %21.4s, v0.s[3] \n"
"fmla v24.4s, %13.4s, v0.s[1] \n"
"fmla v25.4s, %13.4s, v0.s[2] \n"
"fmla v26.4s, %13.4s, v0.s[3] \n"
"fmla v27.4s, %13.4s, v1.s[0] \n"
"fmla v28.4s, %22.4s, v0.s[1] \n"
"fmla v29.4s, %22.4s, v0.s[2] \n"
"fmla v30.4s, %22.4s, v0.s[3] \n"
"fmla v31.4s, %22.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v2.4h}, [%4], #8 \n"
"ld1 {v3.s}[0], [%4] \n"
"fmla v24.4s, %14.4s, v0.s[2] \n"
"fmla v25.4s, %14.4s, v0.s[3] \n"
"fmla v26.4s, %14.4s, v1.s[0] \n"
"fmla v27.4s, %14.4s, v1.s[1] \n"
"fmla v28.4s, %23.4s, v0.s[2] \n"
"fmla v29.4s, %23.4s, v0.s[3] \n"
"fmla v30.4s, %23.4s, v1.s[0] \n"
"fmla v31.4s, %23.4s, v1.s[1] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %15.4s, v2.s[0] \n"
"fmla v25.4s, %15.4s, v2.s[1] \n"
"fmla v26.4s, %15.4s, v2.s[2] \n"
"fmla v27.4s, %15.4s, v2.s[3] \n"
"fmla v28.4s, %24.4s, v2.s[0] \n"
"fmla v29.4s, %24.4s, v2.s[1] \n"
"fmla v30.4s, %24.4s, v2.s[2] \n"
"fmla v31.4s, %24.4s, v2.s[3] \n"
"fmla v24.4s, %16.4s, v2.s[1] \n"
"fmla v25.4s, %16.4s, v2.s[2] \n"
"fmla v26.4s, %16.4s, v2.s[3] \n"
"fmla v27.4s, %16.4s, v3.s[0] \n"
"fmla v28.4s, %25.4s, v2.s[1] \n"
"fmla v29.4s, %25.4s, v2.s[2] \n"
"fmla v30.4s, %25.4s, v2.s[3] \n"
"fmla v31.4s, %25.4s, v3.s[0] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"ld1 {v1.s}[0], [%5] \n"
"fmla v24.4s, %17.4s, v2.s[2] \n"
"fmla v25.4s, %17.4s, v2.s[3] \n"
"fmla v26.4s, %17.4s, v3.s[0] \n"
"fmla v27.4s, %17.4s, v3.s[1] \n"
"fmla v28.4s, %26.4s, v2.s[2] \n"
"fmla v29.4s, %26.4s, v2.s[3] \n"
"fmla v30.4s, %26.4s, v3.s[0] \n"
"fmla v31.4s, %26.4s, v3.s[1] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %18.4s, v0.s[0] \n"
"fmla v25.4s, %18.4s, v0.s[1] \n"
"fmla v26.4s, %18.4s, v0.s[2] \n"
"fmla v27.4s, %18.4s, v0.s[3] \n"
"fmla v28.4s, %27.4s, v0.s[0] \n"
"fmla v29.4s, %27.4s, v0.s[1] \n"
"fmla v30.4s, %27.4s, v0.s[2] \n"
"fmla v31.4s, %27.4s, v0.s[3] \n"
"fmla v24.4s, %19.4s, v0.s[1] \n"
"fmla v25.4s, %19.4s, v0.s[2] \n"
"fmla v26.4s, %19.4s, v0.s[3] \n"
"fmla v27.4s, %19.4s, v1.s[0] \n"
"fmla v28.4s, %28.4s, v0.s[1] \n"
"fmla v29.4s, %28.4s, v0.s[2] \n"
"fmla v30.4s, %28.4s, v0.s[3] \n"
"fmla v31.4s, %28.4s, v1.s[0] \n"
"fmla v24.4s, %20.4s, v0.s[2] \n"
"fmla v25.4s, %20.4s, v0.s[3] \n"
"fmla v26.4s, %20.4s, v1.s[0] \n"
"fmla v27.4s, %20.4s, v1.s[1] \n"
"fmla v28.4s, %29.4s, v0.s[2] \n"
"fmla v29.4s, %29.4s, v0.s[3] \n"
"fmla v30.4s, %29.4s, v1.s[0] \n"
"fmla v31.4s, %29.4s, v1.s[1] \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %12.4s, v0.s[0] \n"
"fmla v25.4s, %12.4s, v0.s[1] \n"
"fmla v26.4s, %21.4s, v0.s[0] \n"
"fmla v27.4s, %21.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v1.4h}, [%4] \n"
"fmla v24.4s, %13.4s, v0.s[1] \n"
"fmla v25.4s, %13.4s, v0.s[2] \n"
"fmla v26.4s, %22.4s, v0.s[1] \n"
"fmla v27.4s, %22.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %14.4s, v0.s[2] \n"
"fmla v25.4s, %14.4s, v0.s[3] \n"
"fmla v26.4s, %23.4s, v0.s[2] \n"
"fmla v27.4s, %23.4s, v0.s[3] \n"
"fmla v24.4s, %15.4s, v1.s[0] \n"
"fmla v25.4s, %15.4s, v1.s[1] \n"
"fmla v26.4s, %24.4s, v1.s[0] \n"
"fmla v27.4s, %24.4s, v1.s[1] \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5] \n"
"fmla v24.4s, %16.4s, v1.s[1] \n"
"fmla v25.4s, %16.4s, v1.s[2] \n"
"fmla v26.4s, %25.4s, v1.s[1] \n"
"fmla v27.4s, %25.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v24.4s, %17.4s, v1.s[2] \n"
"fmla v25.4s, %17.4s, v1.s[3] \n"
"fmla v26.4s, %26.4s, v1.s[2] \n"
"fmla v27.4s, %26.4s, v1.s[3] \n"
"fmla v24.4s, %18.4s, v0.s[0] \n"
"fmla v25.4s, %18.4s, v0.s[1] \n"
"fmla v26.4s, %27.4s, v0.s[0] \n"
"fmla v27.4s, %27.4s, v0.s[1] \n"
"fmla v24.4s, %19.4s, v0.s[1] \n"
"fmla v25.4s, %19.4s, v0.s[2] \n"
"fmla v26.4s, %28.4s, v0.s[1] \n"
"fmla v27.4s, %28.4s, v0.s[2] \n"
"add %3, %3, #4 \n"
"fmla v24.4s, %20.4s, v0.s[2] \n"
"fmla v25.4s, %20.4s, v0.s[3] \n"
"fmla v26.4s, %29.4s, v0.s[2] \n"
"fmla v27.4s, %29.4s, v0.s[3] \n"
"add %4, %4, #4 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"add %5, %5, #4 \n"
"st1 {v24.4h, v25.4h}, [%0], #16 \n"
"st1 {v26.4h, v27.4h}, [%1], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "memory", "v0", "v1", "v24", "v25", "v26", "v27");
}
for (; j < outw; j++)
{
float32x4_t _sum00 = vld1q_f32(outptr0);
float32x4_t _sum10 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2);
_sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0);
_sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1);
_sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2);
_sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0);
_sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1);
_sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2);
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum00), 16));
vst1_u16(outptr1_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum10), 16));
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
outptr0_bf16 += 4;
outptr1_bf16 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const unsigned short* k0 = kernel.channel(p);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<unsigned short>(0);
const unsigned short* r1 = img0.row<unsigned short>(1);
const unsigned short* r2 = img0.row<unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
// "prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n"
"ld1 {v2.s}[0], [%1] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"fmla v28.4s, %8.4s, v1.s[0] \n"
"fmla v29.4s, %8.4s, v1.s[1] \n"
"fmla v30.4s, %8.4s, v1.s[2] \n"
"fmla v31.4s, %8.4s, v1.s[3] \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"fmla v28.4s, %9.4s, v1.s[1] \n"
"fmla v29.4s, %9.4s, v1.s[2] \n"
"fmla v30.4s, %9.4s, v1.s[3] \n"
"fmla v31.4s, %9.4s, v2.s[0] \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v28.4s, %10.4s, v1.s[2] \n"
"fmla v29.4s, %10.4s, v1.s[3] \n"
"fmla v30.4s, %10.4s, v2.s[0] \n"
"fmla v31.4s, %10.4s, v2.s[1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.4h, v5.4h}, [%2], #16 \n"
"ld1 {v2.s}[0], [%2] \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %11.4s, v4.s[0] \n"
"fmla v25.4s, %11.4s, v4.s[1] \n"
"fmla v26.4s, %11.4s, v4.s[2] \n"
"fmla v27.4s, %11.4s, v4.s[3] \n"
"fmla v28.4s, %11.4s, v5.s[0] \n"
"fmla v29.4s, %11.4s, v5.s[1] \n"
"fmla v30.4s, %11.4s, v5.s[2] \n"
"fmla v31.4s, %11.4s, v5.s[3] \n"
"fmla v24.4s, %12.4s, v4.s[1] \n"
"fmla v25.4s, %12.4s, v4.s[2] \n"
"fmla v26.4s, %12.4s, v4.s[3] \n"
"fmla v27.4s, %12.4s, v5.s[0] \n"
"fmla v28.4s, %12.4s, v5.s[1] \n"
"fmla v29.4s, %12.4s, v5.s[2] \n"
"fmla v30.4s, %12.4s, v5.s[3] \n"
"fmla v31.4s, %12.4s, v2.s[0] \n"
"fmla v24.4s, %13.4s, v4.s[2] \n"
"fmla v25.4s, %13.4s, v4.s[3] \n"
"fmla v26.4s, %13.4s, v5.s[0] \n"
"fmla v27.4s, %13.4s, v5.s[1] \n"
"fmla v28.4s, %13.4s, v5.s[2] \n"
"fmla v29.4s, %13.4s, v5.s[3] \n"
"fmla v30.4s, %13.4s, v2.s[0] \n"
"fmla v31.4s, %13.4s, v2.s[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"ld1 {v2.s}[0], [%3] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"fmla v28.4s, %14.4s, v1.s[0] \n"
"fmla v29.4s, %14.4s, v1.s[1] \n"
"fmla v30.4s, %14.4s, v1.s[2] \n"
"fmla v31.4s, %14.4s, v1.s[3] \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v28.4s, %15.4s, v1.s[1] \n"
"fmla v29.4s, %15.4s, v1.s[2] \n"
"fmla v30.4s, %15.4s, v1.s[3] \n"
"fmla v31.4s, %15.4s, v2.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"fmla v28.4s, %16.4s, v1.s[2] \n"
"fmla v29.4s, %16.4s, v1.s[3] \n"
"fmla v30.4s, %16.4s, v2.s[0] \n"
"fmla v31.4s, %16.4s, v2.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n"
"shll v0.4s, v0.4h, #16 \n"
"ld1 {v1.s}[0], [%1] \n"
"fmla v24.4s, %8.4s, v0.s[0] \n"
"fmla v25.4s, %8.4s, v0.s[1] \n"
"fmla v26.4s, %8.4s, v0.s[2] \n"
"fmla v27.4s, %8.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %9.4s, v0.s[1] \n"
"fmla v25.4s, %9.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"fmla v26.4s, %9.4s, v0.s[3] \n"
"fmla v27.4s, %9.4s, v1.s[0] \n"
"ld1 {v3.s}[0], [%2] \n"
"fmla v24.4s, %10.4s, v0.s[2] \n"
"fmla v25.4s, %10.4s, v0.s[3] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v26.4s, %10.4s, v1.s[0] \n"
"fmla v27.4s, %10.4s, v1.s[1] \n"
"fmla v24.4s, %11.4s, v2.s[0] \n"
"fmla v25.4s, %11.4s, v2.s[1] \n"
"fmla v26.4s, %11.4s, v2.s[2] \n"
"fmla v27.4s, %11.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %12.4s, v2.s[1] \n"
"fmla v25.4s, %12.4s, v2.s[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"fmla v26.4s, %12.4s, v2.s[3] \n"
"fmla v27.4s, %12.4s, v3.s[0] \n"
"ld1 {v1.s}[0], [%3] \n"
"fmla v24.4s, %13.4s, v2.s[2] \n"
"fmla v25.4s, %13.4s, v2.s[3] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v26.4s, %13.4s, v3.s[0] \n"
"fmla v27.4s, %13.4s, v3.s[1] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %14.4s, v0.s[2] \n"
"fmla v27.4s, %14.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %15.4s, v0.s[1] \n"
"fmla v25.4s, %15.4s, v0.s[2] \n"
"fmla v26.4s, %15.4s, v0.s[3] \n"
"fmla v27.4s, %15.4s, v1.s[0] \n"
"fmla v24.4s, %16.4s, v0.s[2] \n"
"fmla v25.4s, %16.4s, v0.s[3] \n"
"fmla v26.4s, %16.4s, v1.s[0] \n"
"fmla v27.4s, %16.4s, v1.s[1] \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #64] \n"
"vld1.u16 {d1}, [%1]! \n"
"vld1.u32 {d2[0]}, [%1] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q8, d0[0] \n"
"vmla.f32 q13, %q8, d0[1] \n"
"vmla.f32 q14, %q8, d1[0] \n"
"vmla.f32 q15, %q8, d1[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"vmla.f32 q14, %q9, d1[1] \n"
"vmla.f32 q15, %q9, d2[0] \n"
"vmla.f32 q12, %q10, d1[0] \n"
"vmla.f32 q13, %q10, d1[1] \n"
"vmla.f32 q14, %q10, d2[0] \n"
"vmla.f32 q15, %q10, d2[1] \n"
"pld [%2, #64] \n"
"vld1.u16 {d5}, [%2]! \n"
"vld1.u32 {d3[0]}, [%2] \n"
"vshll.u16 q2, d5, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q11, d4[0] \n"
"vmla.f32 q13, %q11, d4[1] \n"
"vmla.f32 q14, %q11, d5[0] \n"
"vmla.f32 q15, %q11, d5[1] \n"
"vmla.f32 q12, %q12, d4[1] \n"
"vmla.f32 q13, %q12, d5[0] \n"
"vmla.f32 q14, %q12, d5[1] \n"
"vmla.f32 q15, %q12, d2[0] \n"
"vmla.f32 q12, %q13, d5[0] \n"
"vmla.f32 q13, %q13, d5[1] \n"
"vmla.f32 q14, %q13, d2[0] \n"
"vmla.f32 q15, %q13, d2[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3]! \n"
"vld1.u32 {d2[0]}, [%3] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q14, d0[0] \n"
"vmla.f32 q13, %q14, d0[1] \n"
"vmla.f32 q14, %q14, d1[0] \n"
"vmla.f32 q15, %q14, d1[1] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"vmla.f32 q14, %q15, d1[1] \n"
"vmla.f32 q15, %q15, d2[0] \n"
"vmla.f32 q12, %q16, d1[0] \n"
"vmla.f32 q13, %q16, d1[1] \n"
"vmla.f32 q14, %q16, d2[0] \n"
"vmla.f32 q15, %q16, d2[1] \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1] \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v28.4s, v29.4s}, [%0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmul v24.4s, %8.4s, v0.s[0] \n"
"fmul v25.4s, %8.4s, v0.s[1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v1.4h}, [%2] \n"
"fmul v26.4s, %9.4s, v0.s[1] \n"
"fmul v27.4s, %9.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v28.4s, %10.4s, v0.s[2] \n"
"fmla v29.4s, %10.4s, v0.s[3] \n"
"fmla v24.4s, %11.4s, v1.s[0] \n"
"fmla v25.4s, %11.4s, v1.s[1] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3] \n"
"fmla v26.4s, %12.4s, v1.s[1] \n"
"fmla v27.4s, %12.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v28.4s, %13.4s, v1.s[2] \n"
"fmla v29.4s, %13.4s, v1.s[3] \n"
"fmla v24.4s, %14.4s, v0.s[0] \n"
"fmla v25.4s, %14.4s, v0.s[1] \n"
"fmla v26.4s, %15.4s, v0.s[1] \n"
"fmla v27.4s, %15.4s, v0.s[2] \n"
"fmla v28.4s, %16.4s, v0.s[2] \n"
"fmla v29.4s, %16.4s, v0.s[3] \n"
"add %1, %1, #4 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"add %2, %2, #4 \n"
"fadd v28.4s, v28.4s, v24.4s \n"
"fadd v29.4s, v29.4s, v25.4s \n"
"add %3, %3, #4 \n"
"st1 {v28.4s, v29.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29");
#else // __aarch64__
asm volatile(
"pld [%1, #64] \n"
"vld1.u16 {d1}, [%1] \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"
"vshll.u16 q0, d1, #16 \n"
"vmul.f32 q14, %q8, d0[0] \n"
"vmul.f32 q15, %q8, d0[1] \n"
"vmla.f32 q12, %q9, d0[1] \n"
"vmla.f32 q13, %q9, d1[0] \n"
"pld [%2, #64] \n"
"vld1.u16 {d3}, [%2] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q11, d2[0] \n"
"vmla.f32 q13, %q11, d2[1] \n"
"vmla.f32 q14, %q12, d2[1] \n"
"vmla.f32 q15, %q12, d3[0] \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3] \n"
"vmla.f32 q12, %q13, d3[0] \n"
"vmla.f32 q13, %q13, d3[1] \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q14, %q14, d0[0] \n"
"vmla.f32 q15, %q14, d0[1] \n"
"vmla.f32 q12, %q15, d0[1] \n"
"vmla.f32 q13, %q15, d1[0] \n"
"add %1, %1, #4 \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"add %2, %2, #4 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"add %3, %3, #4 \n"
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "memory", "q0", "q1", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<unsigned short>(0);
const unsigned short* r1 = img0.row<unsigned short>(1);
const unsigned short* r2 = img0.row<unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j + 7 < outw; j += 8)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n"
"ld1 {v2.s}[0], [%2] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %10.4s, v0.s[0] \n"
"fmla v25.4s, %10.4s, v0.s[1] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"fmla v28.4s, %10.4s, v1.s[0] \n"
"fmla v29.4s, %10.4s, v1.s[1] \n"
"fmla v30.4s, %10.4s, v1.s[2] \n"
"fmla v31.4s, %10.4s, v1.s[3] \n"
"fmla v24.4s, %11.4s, v0.s[1] \n"
"fmla v25.4s, %11.4s, v0.s[2] \n"
"fmla v26.4s, %11.4s, v0.s[3] \n"
"fmla v27.4s, %11.4s, v1.s[0] \n"
"fmla v28.4s, %11.4s, v1.s[1] \n"
"fmla v29.4s, %11.4s, v1.s[2] \n"
"fmla v30.4s, %11.4s, v1.s[3] \n"
"fmla v31.4s, %11.4s, v2.s[0] \n"
"fmla v24.4s, %12.4s, v0.s[2] \n"
"fmla v25.4s, %12.4s, v0.s[3] \n"
"fmla v26.4s, %12.4s, v1.s[0] \n"
"fmla v27.4s, %12.4s, v1.s[1] \n"
"fmla v28.4s, %12.4s, v1.s[2] \n"
"fmla v29.4s, %12.4s, v1.s[3] \n"
"fmla v30.4s, %12.4s, v2.s[0] \n"
"fmla v31.4s, %12.4s, v2.s[1] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4h, v5.4h}, [%3], #16 \n"
"ld1 {v2.s}[0], [%3] \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %13.4s, v4.s[0] \n"
"fmla v25.4s, %13.4s, v4.s[1] \n"
"fmla v26.4s, %13.4s, v4.s[2] \n"
"fmla v27.4s, %13.4s, v4.s[3] \n"
"fmla v28.4s, %13.4s, v5.s[0] \n"
"fmla v29.4s, %13.4s, v5.s[1] \n"
"fmla v30.4s, %13.4s, v5.s[2] \n"
"fmla v31.4s, %13.4s, v5.s[3] \n"
"fmla v24.4s, %14.4s, v4.s[1] \n"
"fmla v25.4s, %14.4s, v4.s[2] \n"
"fmla v26.4s, %14.4s, v4.s[3] \n"
"fmla v27.4s, %14.4s, v5.s[0] \n"
"fmla v28.4s, %14.4s, v5.s[1] \n"
"fmla v29.4s, %14.4s, v5.s[2] \n"
"fmla v30.4s, %14.4s, v5.s[3] \n"
"fmla v31.4s, %14.4s, v2.s[0] \n"
"fmla v24.4s, %15.4s, v4.s[2] \n"
"fmla v25.4s, %15.4s, v4.s[3] \n"
"fmla v26.4s, %15.4s, v5.s[0] \n"
"fmla v27.4s, %15.4s, v5.s[1] \n"
"fmla v28.4s, %15.4s, v5.s[2] \n"
"fmla v29.4s, %15.4s, v5.s[3] \n"
"fmla v30.4s, %15.4s, v2.s[0] \n"
"fmla v31.4s, %15.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n"
"ld1 {v2.s}[0], [%4] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"fmla v28.4s, %16.4s, v1.s[0] \n"
"fmla v29.4s, %16.4s, v1.s[1] \n"
"fmla v30.4s, %16.4s, v1.s[2] \n"
"fmla v31.4s, %16.4s, v1.s[3] \n"
"fmla v24.4s, %17.4s, v0.s[1] \n"
"fmla v25.4s, %17.4s, v0.s[2] \n"
"fmla v26.4s, %17.4s, v0.s[3] \n"
"fmla v27.4s, %17.4s, v1.s[0] \n"
"fmla v28.4s, %17.4s, v1.s[1] \n"
"fmla v29.4s, %17.4s, v1.s[2] \n"
"fmla v30.4s, %17.4s, v1.s[3] \n"
"fmla v31.4s, %17.4s, v2.s[0] \n"
"fmla v24.4s, %18.4s, v0.s[2] \n"
"fmla v25.4s, %18.4s, v0.s[3] \n"
"fmla v26.4s, %18.4s, v1.s[0] \n"
"fmla v27.4s, %18.4s, v1.s[1] \n"
"fmla v28.4s, %18.4s, v1.s[2] \n"
"fmla v29.4s, %18.4s, v1.s[3] \n"
"fmla v30.4s, %18.4s, v2.s[0] \n"
"fmla v31.4s, %18.4s, v2.s[1] \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif // __aarch64__
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n"
"shll v0.4s, v0.4h, #16 \n"
"ld1 {v1.s}[0], [%2] \n"
"fmla v24.4s, %10.4s, v0.s[0] \n"
"fmla v25.4s, %10.4s, v0.s[1] \n"
"fmla v26.4s, %10.4s, v0.s[2] \n"
"fmla v27.4s, %10.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %11.4s, v0.s[1] \n"
"fmla v25.4s, %11.4s, v0.s[2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3], #8 \n"
"fmla v26.4s, %11.4s, v0.s[3] \n"
"fmla v27.4s, %11.4s, v1.s[0] \n"
"ld1 {v3.s}[0], [%3] \n"
"fmla v24.4s, %12.4s, v0.s[2] \n"
"fmla v25.4s, %12.4s, v0.s[3] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v26.4s, %12.4s, v1.s[0] \n"
"fmla v27.4s, %12.4s, v1.s[1] \n"
"fmla v24.4s, %13.4s, v2.s[0] \n"
"fmla v25.4s, %13.4s, v2.s[1] \n"
"fmla v26.4s, %13.4s, v2.s[2] \n"
"fmla v27.4s, %13.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v24.4s, %14.4s, v2.s[1] \n"
"fmla v25.4s, %14.4s, v2.s[2] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n"
"fmla v26.4s, %14.4s, v2.s[3] \n"
"fmla v27.4s, %14.4s, v3.s[0] \n"
"ld1 {v1.s}[0], [%4] \n"
"fmla v24.4s, %15.4s, v2.s[2] \n"
"fmla v25.4s, %15.4s, v2.s[3] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v26.4s, %15.4s, v3.s[0] \n"
"fmla v27.4s, %15.4s, v3.s[1] \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %16.4s, v0.s[2] \n"
"fmla v27.4s, %16.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v24.4s, %17.4s, v0.s[1] \n"
"fmla v25.4s, %17.4s, v0.s[2] \n"
"fmla v26.4s, %17.4s, v0.s[3] \n"
"fmla v27.4s, %17.4s, v1.s[0] \n"
"fmla v24.4s, %18.4s, v0.s[2] \n"
"fmla v25.4s, %18.4s, v0.s[3] \n"
"fmla v26.4s, %18.4s, v1.s[0] \n"
"fmla v27.4s, %18.4s, v1.s[1] \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2]! \n"
"vld1.u32 {d2[0]}, [%2] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q10, d0[0] \n"
"vmla.f32 q13, %q10, d0[1] \n"
"vmla.f32 q14, %q10, d1[0] \n"
"vmla.f32 q15, %q10, d1[1] \n"
"vmla.f32 q12, %q11, d0[1] \n"
"vmla.f32 q13, %q11, d1[0] \n"
"vmla.f32 q14, %q11, d1[1] \n"
"vmla.f32 q15, %q11, d2[0] \n"
"vmla.f32 q12, %q12, d1[0] \n"
"vmla.f32 q13, %q12, d1[1] \n"
"vmla.f32 q14, %q12, d2[0] \n"
"vmla.f32 q15, %q12, d2[1] \n"
"pld [%3, #64] \n"
"vld1.u16 {d5}, [%3]! \n"
"vld1.u32 {d3[0]}, [%3] \n"
"vshll.u16 q2, d5, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q13, d4[0] \n"
"vmla.f32 q13, %q13, d4[1] \n"
"vmla.f32 q14, %q13, d5[0] \n"
"vmla.f32 q15, %q13, d5[1] \n"
"vmla.f32 q12, %q14, d4[1] \n"
"vmla.f32 q13, %q14, d5[0] \n"
"vmla.f32 q14, %q14, d5[1] \n"
"vmla.f32 q15, %q14, d2[0] \n"
"vmla.f32 q12, %q15, d5[0] \n"
"vmla.f32 q13, %q15, d5[1] \n"
"vmla.f32 q14, %q15, d2[0] \n"
"vmla.f32 q15, %q15, d2[1] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4]! \n"
"vld1.u32 {d2[0]}, [%4] \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q1, d2, #16 \n"
"vmla.f32 q12, %q16, d0[0] \n"
"vmla.f32 q13, %q16, d0[1] \n"
"vmla.f32 q14, %q16, d1[0] \n"
"vmla.f32 q15, %q16, d1[1] \n"
"vmla.f32 q12, %q17, d0[1] \n"
"vmla.f32 q13, %q17, d1[0] \n"
"vmla.f32 q14, %q17, d1[1] \n"
"vmla.f32 q15, %q17, d2[0] \n"
"vmla.f32 q12, %q18, d1[0] \n"
"vmla.f32 q13, %q18, d1[1] \n"
"vmla.f32 q14, %q18, d2[0] \n"
"vmla.f32 q15, %q18, d2[1] \n"
"vshrn.s32 d24, q12, #16 \n"
"vshrn.s32 d25, q13, #16 \n"
"vshrn.s32 d26, q14, #16 \n"
"vshrn.s32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v28.4s, v29.4s}, [%1], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"fmul v24.4s, %10.4s, v0.s[0] \n"
"fmul v25.4s, %10.4s, v0.s[1] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v1.4h}, [%3] \n"
"fmul v26.4s, %11.4s, v0.s[1] \n"
"fmul v27.4s, %11.4s, v0.s[2] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v28.4s, %12.4s, v0.s[2] \n"
"fmla v29.4s, %12.4s, v0.s[3] \n"
"fmla v24.4s, %13.4s, v1.s[0] \n"
"fmla v25.4s, %13.4s, v1.s[1] \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4] \n"
"fmla v26.4s, %14.4s, v1.s[1] \n"
"fmla v27.4s, %14.4s, v1.s[2] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v28.4s, %15.4s, v1.s[2] \n"
"fmla v29.4s, %15.4s, v1.s[3] \n"
"fmla v24.4s, %16.4s, v0.s[0] \n"
"fmla v25.4s, %16.4s, v0.s[1] \n"
"fmla v26.4s, %17.4s, v0.s[1] \n"
"fmla v27.4s, %17.4s, v0.s[2] \n"
"fmla v28.4s, %18.4s, v0.s[2] \n"
"fmla v29.4s, %18.4s, v0.s[3] \n"
"add %2, %2, #4 \n"
"fadd v24.4s, v24.4s, v26.4s \n"
"fadd v25.4s, v25.4s, v27.4s \n"
"add %3, %3, #4 \n"
"fadd v28.4s, v28.4s, v24.4s \n"
"fadd v29.4s, v29.4s, v25.4s \n"
"add %4, %4, #4 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"st1 {v28.4h, v29.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29");
#else // __aarch64__
asm volatile(
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2] \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"
"vshll.u16 q0, d1, #16 \n"
"vmul.f32 q14, %q10, d0[0] \n"
"vmul.f32 q15, %q10, d0[1] \n"
"vmla.f32 q12, %q11, d0[1] \n"
"vmla.f32 q13, %q11, d1[0] \n"
"pld [%3, #64] \n"
"vld1.u16 {d3}, [%3] \n"
"vmla.f32 q14, %q12, d1[0] \n"
"vmla.f32 q15, %q12, d1[1] \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q12, %q13, d2[0] \n"
"vmla.f32 q13, %q13, d2[1] \n"
"vmla.f32 q14, %q14, d2[1] \n"
"vmla.f32 q15, %q14, d3[0] \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4] \n"
"vmla.f32 q12, %q15, d3[0] \n"
"vmla.f32 q13, %q15, d3[1] \n"
"vshll.u16 q0, d1, #16 \n"
"vmla.f32 q14, %q16, d0[0] \n"
"vmla.f32 q15, %q16, d0[1] \n"
"vmla.f32 q12, %q17, d0[1] \n"
"vmla.f32 q13, %q17, d1[0] \n"
"add %2, %2, #4 \n"
"vmla.f32 q14, %q18, d1[0] \n"
"vmla.f32 q15, %q18, d1[1] \n"
"add %3, %3, #4 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"add %4, %4, #4 \n"
"vshrn.s32 d24, q12, #16 \n"
"vshrn.s32 d25, q13, #16 \n"
"vst1.f32 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "memory", "q0", "q1", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
outptr0_bf16 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#if __ARM_NEON && __aarch64__
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator);
#else
Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator);
#endif
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f);
{
float* ptr = (float*)out0;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias0);
vst1q_f32(ptr + 12, _bias0);
vst1q_f32(ptr + 16, _bias1);
vst1q_f32(ptr + 20, _bias1);
vst1q_f32(ptr + 24, _bias1);
vst1q_f32(ptr + 28, _bias1);
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias0);
vst1q_f32(ptr + 8, _bias1);
vst1q_f32(ptr + 12, _bias1);
ptr += 16;
}
for (; j < outw; j++)
{
vst1q_f32(ptr, _bias0);
vst1q_f32(ptr + 4, _bias1);
ptr += 8;
}
}
}
const unsigned short* k0 = kernel.channel(p);
const unsigned short* k1 = kernel.channel(p + 1);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
// r0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
// "prfm pldl1keep, [%0, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum1
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %8.4s, v0.s[0] \n"
"fmla v7.4s, %8.4s, v0.s[2] \n"
"fmla v8.4s, %8.4s, v1.s[0] \n"
"fmla v9.4s, %8.4s, v1.s[2] \n"
"fmla v10.4s, %17.4s, v0.s[0] \n"
"fmla v11.4s, %17.4s, v0.s[2] \n"
"fmla v12.4s, %17.4s, v1.s[0] \n"
"fmla v13.4s, %17.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%1] \n"
"fmla v6.4s, %9.4s, v0.s[1] \n"
"fmla v7.4s, %9.4s, v0.s[3] \n"
"fmla v8.4s, %9.4s, v1.s[1] \n"
"fmla v9.4s, %9.4s, v1.s[3] \n"
"fmla v10.4s, %18.4s, v0.s[1] \n"
"fmla v11.4s, %18.4s, v0.s[3] \n"
"fmla v12.4s, %18.4s, v1.s[1] \n"
"fmla v13.4s, %18.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4h, v3.4h}, [%2], #16 \n"
"fmla v6.4s, %10.4s, v0.s[2] \n"
"fmla v7.4s, %10.4s, v1.s[0] \n"
"fmla v8.4s, %10.4s, v1.s[2] \n"
"fmla v9.4s, %10.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %19.4s, v0.s[2] \n"
"fmla v11.4s, %19.4s, v1.s[0] \n"
"fmla v12.4s, %19.4s, v1.s[2] \n"
"fmla v13.4s, %19.4s, v4.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %11.4s, v2.s[0] \n"
"fmla v7.4s, %11.4s, v2.s[2] \n"
"fmla v8.4s, %11.4s, v3.s[0] \n"
"fmla v9.4s, %11.4s, v3.s[2] \n"
"fmla v10.4s, %20.4s, v2.s[0] \n"
"fmla v11.4s, %20.4s, v2.s[2] \n"
"fmla v12.4s, %20.4s, v3.s[0] \n"
"fmla v13.4s, %20.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%2] \n"
"fmla v6.4s, %12.4s, v2.s[1] \n"
"fmla v7.4s, %12.4s, v2.s[3] \n"
"fmla v8.4s, %12.4s, v3.s[1] \n"
"fmla v9.4s, %12.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v10.4s, %21.4s, v2.s[1] \n"
"fmla v11.4s, %21.4s, v2.s[3] \n"
"fmla v12.4s, %21.4s, v3.s[1] \n"
"fmla v13.4s, %21.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"fmla v6.4s, %13.4s, v2.s[2] \n"
"fmla v7.4s, %13.4s, v3.s[0] \n"
"fmla v8.4s, %13.4s, v3.s[2] \n"
"fmla v9.4s, %13.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %22.4s, v2.s[2] \n"
"fmla v11.4s, %22.4s, v3.s[0] \n"
"fmla v12.4s, %22.4s, v3.s[2] \n"
"fmla v13.4s, %22.4s, v5.s[0] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %14.4s, v0.s[0] \n"
"fmla v7.4s, %14.4s, v0.s[2] \n"
"fmla v8.4s, %14.4s, v1.s[0] \n"
"fmla v9.4s, %14.4s, v1.s[2] \n"
"fmla v10.4s, %23.4s, v0.s[0] \n"
"fmla v11.4s, %23.4s, v0.s[2] \n"
"fmla v12.4s, %23.4s, v1.s[0] \n"
"fmla v13.4s, %23.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%3] \n"
"fmla v6.4s, %15.4s, v0.s[1] \n"
"fmla v7.4s, %15.4s, v0.s[3] \n"
"fmla v8.4s, %15.4s, v1.s[1] \n"
"fmla v9.4s, %15.4s, v1.s[3] \n"
"fmla v10.4s, %24.4s, v0.s[1] \n"
"fmla v11.4s, %24.4s, v0.s[3] \n"
"fmla v12.4s, %24.4s, v1.s[1] \n"
"fmla v13.4s, %24.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[2] \n"
"fmla v7.4s, %16.4s, v1.s[0] \n"
"fmla v8.4s, %16.4s, v1.s[2] \n"
"fmla v9.4s, %16.4s, v4.s[0] \n"
"sub %0, %0, #64 \n"
"fmla v10.4s, %25.4s, v0.s[2] \n"
"fmla v11.4s, %25.4s, v1.s[0] \n"
"fmla v12.4s, %25.4s, v1.s[2] \n"
"fmla v13.4s, %25.4s, v4.s[0] \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
// r0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum0 sum1
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %8.4s, v0.s[0] \n"
"fmla v11.4s, %8.4s, v0.s[2] \n"
"fmla v12.4s, %17.4s, v0.s[0] \n"
"fmla v13.4s, %17.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%1] \n"
"fmla v10.4s, %9.4s, v0.s[1] \n"
"fmla v11.4s, %9.4s, v0.s[3] \n"
"fmla v12.4s, %18.4s, v0.s[1] \n"
"fmla v13.4s, %18.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"fmla v10.4s, %10.4s, v0.s[2] \n"
"fmla v11.4s, %10.4s, v1.s[0] \n"
"fmla v12.4s, %19.4s, v0.s[2] \n"
"fmla v13.4s, %19.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %11.4s, v2.s[0] \n"
"fmla v11.4s, %11.4s, v2.s[2] \n"
"fmla v12.4s, %20.4s, v2.s[0] \n"
"fmla v13.4s, %20.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%2] \n"
"fmla v10.4s, %12.4s, v2.s[1] \n"
"fmla v11.4s, %12.4s, v2.s[3] \n"
"fmla v12.4s, %21.4s, v2.s[1] \n"
"fmla v13.4s, %21.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"fmla v10.4s, %13.4s, v2.s[2] \n"
"fmla v11.4s, %13.4s, v3.s[0] \n"
"fmla v12.4s, %22.4s, v2.s[2] \n"
"fmla v13.4s, %22.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %14.4s, v0.s[0] \n"
"fmla v11.4s, %14.4s, v0.s[2] \n"
"fmla v12.4s, %23.4s, v0.s[0] \n"
"fmla v13.4s, %23.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%3] \n"
"fmla v10.4s, %15.4s, v0.s[1] \n"
"fmla v11.4s, %15.4s, v0.s[3] \n"
"fmla v12.4s, %24.4s, v0.s[1] \n"
"fmla v13.4s, %24.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v10.4s, %16.4s, v0.s[2] \n"
"fmla v11.4s, %16.4s, v1.s[0] \n"
"fmla v12.4s, %25.4s, v0.s[2] \n"
"fmla v13.4s, %25.4s, v1.s[0] \n"
"st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00_0), // %8
"w"(_k01_0), // %9
"w"(_k02_0), // %10
"w"(_k10_0), // %11
"w"(_k11_0), // %12
"w"(_k12_0), // %13
"w"(_k20_0), // %14
"w"(_k21_0), // %15
"w"(_k22_0), // %16
"w"(_k00_1), // %17
"w"(_k01_1), // %18
"w"(_k02_1), // %19
"w"(_k10_1), // %20
"w"(_k11_1), // %21
"w"(_k12_1), // %22
"w"(_k20_1), // %23
"w"(_k21_1), // %24
"w"(_k22_1) // %25
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0 + 4, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
unsigned short* outptr1_bf16 = top_blob.channel(p + 1);
const float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22_0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
float32x4_t _k00_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1), 16));
float32x4_t _k01_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 4), 16));
float32x4_t _k02_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 8), 16));
float32x4_t _k10_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 12), 16));
float32x4_t _k11_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 16), 16));
float32x4_t _k12_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 20), 16));
float32x4_t _k20_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 24), 16));
float32x4_t _k21_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 28), 16));
float32x4_t _k22_1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k1 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
// r0
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%2], #64 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum1
"fmla v6.4s, %12.4s, v0.s[0] \n"
"fmla v7.4s, %12.4s, v0.s[2] \n"
"fmla v8.4s, %12.4s, v1.s[0] \n"
"fmla v9.4s, %12.4s, v1.s[2] \n"
"fmla v10.4s, %21.4s, v0.s[0] \n"
"fmla v11.4s, %21.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v1.s[0] \n"
"fmla v13.4s, %21.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%3] \n"
"fmla v6.4s, %13.4s, v0.s[1] \n"
"fmla v7.4s, %13.4s, v0.s[3] \n"
"fmla v8.4s, %13.4s, v1.s[1] \n"
"fmla v9.4s, %13.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v10.4s, %22.4s, v0.s[1] \n"
"fmla v11.4s, %22.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v1.s[1] \n"
"fmla v13.4s, %22.4s, v1.s[3] \n"
// r1
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v2.4h, v3.4h}, [%4], #16 \n"
"fmla v6.4s, %14.4s, v0.s[2] \n"
"fmla v7.4s, %14.4s, v1.s[0] \n"
"fmla v8.4s, %14.4s, v1.s[2] \n"
"fmla v9.4s, %14.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %23.4s, v0.s[2] \n"
"fmla v11.4s, %23.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v1.s[2] \n"
"fmla v13.4s, %23.4s, v4.s[0] \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %15.4s, v2.s[0] \n"
"fmla v7.4s, %15.4s, v2.s[2] \n"
"fmla v8.4s, %15.4s, v3.s[0] \n"
"fmla v9.4s, %15.4s, v3.s[2] \n"
"fmla v10.4s, %24.4s, v2.s[0] \n"
"fmla v11.4s, %24.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v3.s[0] \n"
"fmla v13.4s, %24.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%4] \n"
"fmla v6.4s, %16.4s, v2.s[1] \n"
"fmla v7.4s, %16.4s, v2.s[3] \n"
"fmla v8.4s, %16.4s, v3.s[1] \n"
"fmla v9.4s, %16.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
"fmla v10.4s, %25.4s, v2.s[1] \n"
"fmla v11.4s, %25.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v3.s[1] \n"
"fmla v13.4s, %25.4s, v3.s[3] \n"
// r2
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4h, v1.4h}, [%5], #16 \n"
"fmla v6.4s, %17.4s, v2.s[2] \n"
"fmla v7.4s, %17.4s, v3.s[0] \n"
"fmla v8.4s, %17.4s, v3.s[2] \n"
"fmla v9.4s, %17.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %26.4s, v2.s[2] \n"
"fmla v11.4s, %26.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v3.s[2] \n"
"fmla v13.4s, %26.4s, v5.s[0] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %18.4s, v0.s[0] \n"
"fmla v7.4s, %18.4s, v0.s[2] \n"
"fmla v8.4s, %18.4s, v1.s[0] \n"
"fmla v9.4s, %18.4s, v1.s[2] \n"
"fmla v10.4s, %27.4s, v0.s[0] \n"
"fmla v11.4s, %27.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v1.s[0] \n"
"fmla v13.4s, %27.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%5] \n"
"fmla v6.4s, %19.4s, v0.s[1] \n"
"fmla v7.4s, %19.4s, v0.s[3] \n"
"fmla v8.4s, %19.4s, v1.s[1] \n"
"fmla v9.4s, %19.4s, v1.s[3] \n"
"fmla v10.4s, %28.4s, v0.s[1] \n"
"fmla v11.4s, %28.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v1.s[1] \n"
"fmla v13.4s, %28.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %20.4s, v0.s[2] \n"
"fmla v7.4s, %20.4s, v1.s[0] \n"
"fmla v8.4s, %20.4s, v1.s[2] \n"
"fmla v9.4s, %20.4s, v4.s[0] \n"
"fmla v10.4s, %29.4s, v0.s[2] \n"
"fmla v11.4s, %29.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v1.s[2] \n"
"fmla v13.4s, %29.4s, v4.s[0] \n"
"shrn v6.4h, v6.4s, #16 \n"
"shrn v7.4h, v7.4s, #16 \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
// r0
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum0 sum1
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %12.4s, v0.s[0] \n"
"fmla v11.4s, %12.4s, v0.s[2] \n"
"fmla v12.4s, %21.4s, v0.s[0] \n"
"fmla v13.4s, %21.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%3] \n"
"fmla v10.4s, %13.4s, v0.s[1] \n"
"fmla v11.4s, %13.4s, v0.s[3] \n"
"fmla v12.4s, %22.4s, v0.s[1] \n"
"fmla v13.4s, %22.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v2.4h}, [%4], #8 \n"
"fmla v10.4s, %14.4s, v0.s[2] \n"
"fmla v11.4s, %14.4s, v1.s[0] \n"
"fmla v12.4s, %23.4s, v0.s[2] \n"
"fmla v13.4s, %23.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v10.4s, %15.4s, v2.s[0] \n"
"fmla v11.4s, %15.4s, v2.s[2] \n"
"fmla v12.4s, %24.4s, v2.s[0] \n"
"fmla v13.4s, %24.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%4] \n"
"fmla v10.4s, %16.4s, v2.s[1] \n"
"fmla v11.4s, %16.4s, v2.s[3] \n"
"fmla v12.4s, %25.4s, v2.s[1] \n"
"fmla v13.4s, %25.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"fmla v10.4s, %17.4s, v2.s[2] \n"
"fmla v11.4s, %17.4s, v3.s[0] \n"
"fmla v12.4s, %26.4s, v2.s[2] \n"
"fmla v13.4s, %26.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v10.4s, %18.4s, v0.s[0] \n"
"fmla v11.4s, %18.4s, v0.s[2] \n"
"fmla v12.4s, %27.4s, v0.s[0] \n"
"fmla v13.4s, %27.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%5] \n"
"fmla v10.4s, %19.4s, v0.s[1] \n"
"fmla v11.4s, %19.4s, v0.s[3] \n"
"fmla v12.4s, %28.4s, v0.s[1] \n"
"fmla v13.4s, %28.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v10.4s, %20.4s, v0.s[2] \n"
"fmla v11.4s, %20.4s, v1.s[0] \n"
"fmla v12.4s, %29.4s, v0.s[2] \n"
"fmla v13.4s, %29.4s, v1.s[0] \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"st1 {v10.4h, v11.4h}, [%0], #16 \n"
"st1 {v12.4h, v13.4h}, [%1], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr1_bf16), // %1
"=r"(outptr0), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(outptr0_bf16),
"1"(outptr1_bf16),
"2"(outptr0),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00_0), // %12
"w"(_k01_0), // %13
"w"(_k02_0), // %14
"w"(_k10_0), // %15
"w"(_k11_0), // %16
"w"(_k12_0), // %17
"w"(_k20_0), // %18
"w"(_k21_0), // %19
"w"(_k22_0), // %20
"w"(_k00_1), // %21
"w"(_k01_1), // %22
"w"(_k02_1), // %23
"w"(_k10_1), // %24
"w"(_k11_1), // %25
"w"(_k12_1), // %26
"w"(_k20_1), // %27
"w"(_k21_1), // %28
"w"(_k22_1) // %29
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13");
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr0 + 4);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
_sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2);
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
vst1_u16(outptr1_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum1), 16));
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
outptr0_bf16 += 4;
outptr1_bf16 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob_fp32.channel(get_omp_thread_num());
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
const unsigned short* k0 = kernel.channel(p);
int q = 0;
for (; q < inch - 1; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4h, v1.4h}, [%1], #16 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0] \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %8.4s, v0.s[0] \n"
"fmla v7.4s, %8.4s, v0.s[2] \n"
"fmla v8.4s, %8.4s, v1.s[0] \n"
"fmla v9.4s, %8.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%1] \n"
"fmla v6.4s, %9.4s, v0.s[1] \n"
"fmla v7.4s, %9.4s, v0.s[3] \n"
"fmla v8.4s, %9.4s, v1.s[1] \n"
"fmla v9.4s, %9.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4h, v3.4h}, [%2], #16 \n"
"fmla v6.4s, %10.4s, v0.s[2] \n"
"fmla v7.4s, %10.4s, v1.s[0] \n"
"fmla v8.4s, %10.4s, v1.s[2] \n"
"fmla v9.4s, %10.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %11.4s, v2.s[0] \n"
"fmla v7.4s, %11.4s, v2.s[2] \n"
"fmla v8.4s, %11.4s, v3.s[0] \n"
"fmla v9.4s, %11.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%2] \n"
"fmla v6.4s, %12.4s, v2.s[1] \n"
"fmla v7.4s, %12.4s, v2.s[3] \n"
"fmla v8.4s, %12.4s, v3.s[1] \n"
"fmla v9.4s, %12.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
// r2
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4h, v1.4h}, [%3], #16 \n"
"fmla v6.4s, %13.4s, v2.s[2] \n"
"fmla v7.4s, %13.4s, v3.s[0] \n"
"fmla v8.4s, %13.4s, v3.s[2] \n"
"fmla v9.4s, %13.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %14.4s, v0.s[0] \n"
"fmla v7.4s, %14.4s, v0.s[2] \n"
"fmla v8.4s, %14.4s, v1.s[0] \n"
"fmla v9.4s, %14.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%3] \n"
"fmla v6.4s, %15.4s, v0.s[1] \n"
"fmla v7.4s, %15.4s, v0.s[3] \n"
"fmla v8.4s, %15.4s, v1.s[1] \n"
"fmla v9.4s, %15.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[2] \n"
"fmla v7.4s, %16.4s, v1.s[0] \n"
"fmla v8.4s, %16.4s, v1.s[2] \n"
"fmla v9.4s, %16.4s, v4.s[0] \n"
"st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%1, #128] \n"
"vld1.u16 {d12-d13}, [%1]! \n"
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n" // sum0
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%1] \n"
"vmla.f32 q0, %q8, d8[0] \n"
"vmla.f32 q1, %q8, d9[0] \n"
"vmla.f32 q2, %q8, d10[0] \n"
"vmla.f32 q3, %q8, d11[0] \n"
"vmla.f32 q0, %q9, d8[1] \n"
"vmla.f32 q1, %q9, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q9, d10[1] \n"
"vmla.f32 q3, %q9, d11[1] \n"
// r1
"pld [%2, #128] \n"
"vld1.u16 {d12-d13}, [%2]! \n"
"vmla.f32 q0, %q10, d9[0] \n"
"vmla.f32 q1, %q10, d10[0] \n"
"vmla.f32 q2, %q10, d11[0] \n"
"vmla.f32 q3, %q10, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%2] \n"
"vmla.f32 q0, %q11, d8[0] \n"
"vmla.f32 q1, %q11, d9[0] \n"
"vmla.f32 q2, %q11, d10[0] \n"
"vmla.f32 q3, %q11, d11[0] \n"
"vmla.f32 q0, %q12, d8[1] \n"
"vmla.f32 q1, %q12, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q12, d10[1] \n"
"vmla.f32 q3, %q12, d11[1] \n"
// r2
"pld [%3, #128] \n"
"vld1.u16 {d12-d13}, [%3]! \n"
"vmla.f32 q0, %q13, d9[0] \n"
"vmla.f32 q1, %q13, d10[0] \n"
"vmla.f32 q2, %q13, d11[0] \n"
"vmla.f32 q3, %q13, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%3] \n"
"vmla.f32 q0, %q14, d8[0] \n"
"vmla.f32 q1, %q14, d9[0] \n"
"vmla.f32 q2, %q14, d10[0] \n"
"vmla.f32 q3, %q14, d11[0] \n"
"vmla.f32 q0, %q15, d8[1] \n"
"vmla.f32 q1, %q15, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q15, d10[1] \n"
"vmla.f32 q3, %q15, d11[1] \n"
"vmla.f32 q0, %q16, d9[0] \n"
"vmla.f32 q1, %q16, d10[0] \n"
"vmla.f32 q2, %q16, d11[0] \n"
"vmla.f32 q3, %q16, d8[0] \n"
"vstm %0!, {d0-d7} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v0.4h}, [%1], #8 \n"
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v8.4s, v9.4s}, [%0] \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"fmul v6.4s, %8.4s, v0.s[0] \n"
"fmul v7.4s, %8.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%1] \n"
"fmla v8.4s, %9.4s, v0.s[1] \n"
"fmla v9.4s, %9.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.4h}, [%2], #8 \n"
"fmla v6.4s, %10.4s, v0.s[2] \n"
"fmla v7.4s, %10.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v8.4s, %11.4s, v2.s[0] \n"
"fmla v9.4s, %11.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%2] \n"
"fmla v6.4s, %12.4s, v2.s[1] \n"
"fmla v7.4s, %12.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v0.4h}, [%3], #8 \n"
"fmla v8.4s, %13.4s, v2.s[2] \n"
"fmla v9.4s, %13.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v6.4s, %14.4s, v0.s[0] \n"
"fmla v7.4s, %14.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%3] \n"
"fmla v8.4s, %15.4s, v0.s[1] \n"
"fmla v9.4s, %15.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[2] \n"
"fmla v7.4s, %16.4s, v1.s[0] \n"
"fadd v8.4s, v8.4s, v6.4s \n"
"fadd v9.4s, v9.4s, v7.4s \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%1, #64] \n"
"vld1.u16 {d9}, [%1]! \n"
"pld [%0, #256] \n"
"vld1.f32 {d4-d7}, [%0] \n" // sum0
"vshll.u16 q4, d9, #16 \n"
"vmul.f32 q0, %q8, d8[0] \n"
"vmul.f32 q1, %q8, d9[0] \n"
"vld1.u16 {d11[]}, [%1] \n"
"vmla.f32 q2, %q9, d8[1] \n"
"vmla.f32 q3, %q9, d9[1] \n"
"vshll.u16 q5, d11, #16 \n"
// r1
"pld [%2, #64] \n"
"vld1.u16 {d13}, [%2]! \n"
"vmla.f32 q0, %q10, d9[0] \n"
"vmla.f32 q1, %q10, d10[0] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q2, %q11, d12[0] \n"
"vmla.f32 q3, %q11, d13[0] \n"
"vld1.u16 {d9[]}, [%2] \n"
"vmla.f32 q0, %q12, d12[1] \n"
"vmla.f32 q1, %q12, d13[1] \n"
"vshll.u16 q4, d9, #16 \n"
// r2
"pld [%3, #64] \n"
"vld1.u16 {d11}, [%3]! \n"
"vmla.f32 q2, %q13, d13[0] \n"
"vmla.f32 q3, %q13, d8[0] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q0, %q14, d10[0] \n"
"vmla.f32 q1, %q14, d11[0] \n"
"vld1.u16 {d13[]}, [%3] \n"
"vmla.f32 q2, %q15, d10[1] \n"
"vmla.f32 q3, %q15, d11[1] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q0, %q16, d11[0] \n"
"vmla.f32 q1, %q16, d12[0] \n"
"vadd.f32 q2, q2, q0 \n"
"vadd.f32 q3, q3, q1 \n"
"vst1.f32 {d4-d7}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22) // %16
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1q_f32(outptr0, _sum0);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
for (; q < inch; q++)
{
unsigned short* outptr0_bf16 = top_blob.channel(p);
const float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0 + 32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4h, v1.4h}, [%2], #16 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %10.4s, v0.s[0] \n"
"fmla v7.4s, %10.4s, v0.s[2] \n"
"fmla v8.4s, %10.4s, v1.s[0] \n"
"fmla v9.4s, %10.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%2] \n"
"fmla v6.4s, %11.4s, v0.s[1] \n"
"fmla v7.4s, %11.4s, v0.s[3] \n"
"fmla v8.4s, %11.4s, v1.s[1] \n"
"fmla v9.4s, %11.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
// r1
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v2.4h, v3.4h}, [%3], #16 \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"fmla v8.4s, %12.4s, v1.s[2] \n"
"fmla v9.4s, %12.4s, v4.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v6.4s, %13.4s, v2.s[0] \n"
"fmla v7.4s, %13.4s, v2.s[2] \n"
"fmla v8.4s, %13.4s, v3.s[0] \n"
"fmla v9.4s, %13.4s, v3.s[2] \n"
"ld1 {v5.h}[0], [%3] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"fmla v8.4s, %14.4s, v3.s[1] \n"
"fmla v9.4s, %14.4s, v3.s[3] \n"
"shll v5.4s, v5.4h, #16 \n"
// r2
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v0.4h, v1.4h}, [%4], #16 \n"
"fmla v6.4s, %15.4s, v2.s[2] \n"
"fmla v7.4s, %15.4s, v3.s[0] \n"
"fmla v8.4s, %15.4s, v3.s[2] \n"
"fmla v9.4s, %15.4s, v5.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"fmla v8.4s, %16.4s, v1.s[0] \n"
"fmla v9.4s, %16.4s, v1.s[2] \n"
"ld1 {v4.h}[0], [%4] \n"
"fmla v6.4s, %17.4s, v0.s[1] \n"
"fmla v7.4s, %17.4s, v0.s[3] \n"
"fmla v8.4s, %17.4s, v1.s[1] \n"
"fmla v9.4s, %17.4s, v1.s[3] \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fmla v8.4s, %18.4s, v1.s[2] \n"
"fmla v9.4s, %18.4s, v4.s[0] \n"
"shrn v6.4h, v6.4s, #16 \n"
"shrn v7.4h, v7.4s, #16 \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%2, #128] \n"
"vld1.u16 {d12-d13}, [%2]! \n"
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n" // sum0
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%2] \n"
"vmla.f32 q0, %q10, d8[0] \n"
"vmla.f32 q1, %q10, d9[0] \n"
"vmla.f32 q2, %q10, d10[0] \n"
"vmla.f32 q3, %q10, d11[0] \n"
"vmla.f32 q0, %q11, d8[1] \n"
"vmla.f32 q1, %q11, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q11, d10[1] \n"
"vmla.f32 q3, %q11, d11[1] \n"
// r1
"pld [%3, #128] \n"
"vld1.u16 {d12-d13}, [%3]! \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vmla.f32 q2, %q12, d11[0] \n"
"vmla.f32 q3, %q12, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%3] \n"
"vmla.f32 q0, %q13, d8[0] \n"
"vmla.f32 q1, %q13, d9[0] \n"
"vmla.f32 q2, %q13, d10[0] \n"
"vmla.f32 q3, %q13, d11[0] \n"
"vmla.f32 q0, %q14, d8[1] \n"
"vmla.f32 q1, %q14, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q14, d10[1] \n"
"vmla.f32 q3, %q14, d11[1] \n"
// r2
"pld [%4, #128] \n"
"vld1.u16 {d12-d13}, [%4]! \n"
"vmla.f32 q0, %q15, d9[0] \n"
"vmla.f32 q1, %q15, d10[0] \n"
"vmla.f32 q2, %q15, d11[0] \n"
"vmla.f32 q3, %q15, d8[0] \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vld1.u16 {d12[0]}, [%4] \n"
"vmla.f32 q0, %q16, d8[0] \n"
"vmla.f32 q1, %q16, d9[0] \n"
"vmla.f32 q2, %q16, d10[0] \n"
"vmla.f32 q3, %q16, d11[0] \n"
"vmla.f32 q0, %q17, d8[1] \n"
"vmla.f32 q1, %q17, d9[1] \n"
"vshl.u32 d8, d12, #16 \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vmla.f32 q0, %q18, d9[0] \n"
"vmla.f32 q1, %q18, d10[0] \n"
"vmla.f32 q2, %q18, d11[0] \n"
"vmla.f32 q3, %q18, d8[0] \n"
"vshrn.u32 d0, q0, #16 \n"
"vshrn.u32 d1, q1, #16 \n"
"vshrn.u32 d2, q2, #16 \n"
"vshrn.u32 d3, q3, #16 \n"
"vst1.u16 {d0-d3}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
// r0
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v0.4h}, [%2], #8 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v8.4s, v9.4s}, [%1], #32 \n" // sum0
"shll v0.4s, v0.4h, #16 \n"
"fmul v6.4s, %10.4s, v0.s[0] \n"
"fmul v7.4s, %10.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%2] \n"
"fmla v8.4s, %11.4s, v0.s[1] \n"
"fmla v9.4s, %11.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
// r1
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v2.4h}, [%3], #8 \n"
"fmla v6.4s, %12.4s, v0.s[2] \n"
"fmla v7.4s, %12.4s, v1.s[0] \n"
"shll v2.4s, v2.4h, #16 \n"
"fmla v8.4s, %13.4s, v2.s[0] \n"
"fmla v9.4s, %13.4s, v2.s[2] \n"
"ld1 {v3.h}[0], [%3] \n"
"fmla v6.4s, %14.4s, v2.s[1] \n"
"fmla v7.4s, %14.4s, v2.s[3] \n"
"shll v3.4s, v3.4h, #16 \n"
// r2
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v0.4h}, [%4], #8 \n"
"fmla v8.4s, %15.4s, v2.s[2] \n"
"fmla v9.4s, %15.4s, v3.s[0] \n"
"shll v0.4s, v0.4h, #16 \n"
"fmla v6.4s, %16.4s, v0.s[0] \n"
"fmla v7.4s, %16.4s, v0.s[2] \n"
"ld1 {v1.h}[0], [%4] \n"
"fmla v8.4s, %17.4s, v0.s[1] \n"
"fmla v9.4s, %17.4s, v0.s[3] \n"
"shll v1.4s, v1.4h, #16 \n"
"fmla v6.4s, %18.4s, v0.s[2] \n"
"fmla v7.4s, %18.4s, v1.s[0] \n"
"fadd v8.4s, v8.4s, v6.4s \n"
"fadd v9.4s, v9.4s, v7.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%0], #16 \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else // __aarch64__
asm volatile(
// r0
"pld [%2, #64] \n"
"vld1.u16 {d9}, [%2]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d4-d7}, [%1]! \n" // sum0
"vshll.u16 q4, d9, #16 \n"
"vmul.f32 q0, %q10, d8[0] \n"
"vmul.f32 q1, %q10, d9[0] \n"
"vld1.u16 {d11[]}, [%2] \n"
"vmla.f32 q2, %q11, d8[1] \n"
"vmla.f32 q3, %q11, d9[1] \n"
"vshll.u16 q5, d11, #16 \n"
// r1
"pld [%3, #64] \n"
"vld1.u16 {d13}, [%3]! \n"
"vmla.f32 q0, %q12, d9[0] \n"
"vmla.f32 q1, %q12, d10[0] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q2, %q13, d12[0] \n"
"vmla.f32 q3, %q13, d13[0] \n"
"vld1.u16 {d9[]}, [%3] \n"
"vmla.f32 q0, %q14, d12[1] \n"
"vmla.f32 q1, %q14, d13[1] \n"
"vshll.u16 q4, d9, #16 \n"
// r2
"pld [%4, #64] \n"
"vld1.u16 {d11}, [%4]! \n"
"vmla.f32 q2, %q15, d13[0] \n"
"vmla.f32 q3, %q15, d8[0] \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q0, %q16, d10[0] \n"
"vmla.f32 q1, %q16, d11[0] \n"
"vld1.u16 {d13[]}, [%4] \n"
"vmla.f32 q2, %q17, d10[1] \n"
"vmla.f32 q3, %q17, d11[1] \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q0, %q18, d11[0] \n"
"vmla.f32 q1, %q18, d12[0] \n"
"vadd.f32 q2, q2, q0 \n"
"vadd.f32 q3, q3, q1 \n"
"vshrn.u32 d2, q2, #16 \n"
"vshrn.u32 d3, q3, #16 \n"
"vst1.u16 {d2-d3}, [%0 :64]! \n"
: "=r"(outptr0_bf16), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(outptr0_bf16),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k00), // %10
"w"(_k01), // %11
"w"(_k02), // %12
"w"(_k10), // %13
"w"(_k11), // %14
"w"(_k12), // %15
"w"(_k20), // %16
"w"(_k21), // %17
"w"(_k22) // %18
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _r0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
#if __aarch64__
_sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2);
#else
_sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0);
_sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1);
_sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0);
#endif
vst1_u16(outptr0_bf16, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
outptr0_bf16 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
rmse.c | /*************************************************************************/
/** File: rmse.c **/
/** Description: calculate root mean squared error of particular **/
/** clustering. **/
/** Author: Sang-Ha Lee **/
/** University of Virginia. **/
/** **/
/** Note: euclid_dist_2() and find_nearest_point() adopted from **/
/** Minebench code. **/
/** **/
/*************************************************************************/
#include <float.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "kmeans.h"
extern double wtime(void);
/*----< euclid_dist_2() >----------------------------------------------------*/
/* multi-dimensional spatial Euclid distance square */
__inline float euclid_dist_2(float* pt1,
float* pt2,
int numdims) {
int i;
float ans = 0.0;
for (i = 0; i < numdims; i++)
ans += (pt1[i] - pt2[i]) * (pt1[i] - pt2[i]);
return (ans);
}
/*----< find_nearest_point() >-----------------------------------------------*/
__inline int find_nearest_point(float* pt, /* [nfeatures] */
int nfeatures,
float** pts, /* [npts][nfeatures] */
int npts) {
int index, i;
float max_dist = FLT_MAX;
/* find the cluster center id with min distance to pt */
for (i = 0; i < npts; i++) {
float dist;
dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */
if (dist < max_dist) {
max_dist = dist;
index = i;
}
}
return (index);
}
/*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/
float rms_err(float** feature, /* [npoints][nfeatures] */
int nfeatures,
int npoints,
float** cluster_centres, /* [nclusters][nfeatures] */
int nclusters) {
int i;
int nearest_cluster_index; /* cluster center id with min distance to pt */
float sum_euclid = 0.0; /* sum of Euclidean distance squares */
float ret; /* return value */
/* calculate and sum the sqaure of euclidean distance*/
#pragma omp parallel for shared(feature, cluster_centres) \
firstprivate(npoints, nfeatures, nclusters) private(i, nearest_cluster_index) \
schedule(static)
for (i = 0; i < npoints; i++) {
nearest_cluster_index = find_nearest_point(feature[i],
nfeatures,
cluster_centres,
nclusters);
sum_euclid += euclid_dist_2(feature[i],
cluster_centres[nearest_cluster_index],
nfeatures);
}
/* divide by n, then take sqrt */
ret = sqrt(sum_euclid / npoints);
return (ret);
}
|
ccv_bbf.c | #include "ccv.h"
#include "ccv_internal.h"
#include <sys/time.h>
#ifdef HAVE_GSL
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
const ccv_bbf_param_t ccv_bbf_default_params = {
.interval = 5,
.min_neighbors = 2,
.accurate = 1,
.flags = 0,
.size = {
24,
24,
},
};
#define _ccv_width_padding(x) (((x) + 3) & -4)
static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8)
{
#define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]]))
#define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]]))
unsigned char pmin = pf_at(0), nmax = nf_at(0);
/* check if every point in P > every point in N, and take a shortcut */
if (pmin <= nmax)
return 0;
int i;
for (i = 1; i < feature->size; i++)
{
if (feature->pz[i] >= 0)
{
int p = pf_at(i);
if (p < pmin)
{
if (p <= nmax)
return 0;
pmin = p;
}
}
if (feature->nz[i] >= 0)
{
int n = nf_at(i);
if (n > nmax)
{
if (pmin <= n)
return 0;
nmax = n;
}
}
}
#undef pf_at
#undef nf_at
return 1;
}
static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* r = fopen(file, "r");
if (r == 0) return -1;
int stat = 0;
stat |= fscanf(r, "%d", &classifier->count);
union { float fl; int i; } fli;
stat |= fscanf(r, "%d", &fli.i);
classifier->threshold = fli.fl;
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
int i, j;
for (i = 0; i < classifier->count; i++)
{
stat |= fscanf(r, "%d", &classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]);
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
stat |= fscanf(r, "%d %d", &flia.i, &flib.i);
classifier->alpha[i * 2] = flia.fl;
classifier->alpha[i * 2 + 1] = flib.fl;
}
fclose(r);
return 0;
}
#ifdef HAVE_GSL
static unsigned int _ccv_bbf_time_measure()
{
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
#define less_than(a, b, aux) ((a) < (b))
CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than)
#undef less_than
static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval)
{
int i, j;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
peval[i] = sum;
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
neval[i] = sum;
}
}
static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size)
{
float* peval = (float*)ccmalloc(posnum * sizeof(float));
int i, j, k, rpos = posnum;
for (i = 0; i < cascade->count; i++)
{
_ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0);
k = 0;
for (j = 0; j < rpos; j++)
if (peval[j] >= cascade->stage_classifier[i].threshold)
{
posdata[k] = posdata[j];
++k;
} else {
ccfree(posdata[j]);
}
rpos = k;
}
ccfree(peval);
return rpos;
}
static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum)
{
int t, i, j, k, q;
int negperbg = negnum / bgnum + 1;
int negtotal = 0;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs1 = steps[1] * (cascade->size.height >> 1);
int isizs2 = steps[2] * (cascade->size.height >> 2);
int* idcheck = (int*)ccmalloc(negnum * sizeof(int));
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
gsl_rng_set(rng, (unsigned long int)idcheck);
ccv_size_t imgsz = cascade->size;
int rneg = negtotal;
for (t = 0; negtotal < negnum; t++)
{
printf("preparing negative data ... 0%%");
for (i = 0; i < bgnum; i++)
{
negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal;
ccv_dense_matrix_t* image = 0;
ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert((image->type & CCV_C1) && (image->type & CCV_8U));
if (image == 0)
{
printf("\n%s file corrupted\n", bgfiles[i]);
continue;
}
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size };
ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params);
memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int));
for (j = 0; j < ccv_min(detected->rnum, negperbg); j++)
{
int r = gsl_rng_uniform_int(rng, detected->rnum);
int flag = 1;
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r);
while (flag) {
flag = 0;
for (k = 0; k < j; k++)
if (r == idcheck[k])
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
break;
}
rect = (ccv_rect_t*)ccv_array_get(detected, r);
if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x >= image->cols) || (rect->height + rect->y >= image->rows))
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
}
}
idcheck[j] = r;
ccv_dense_matrix_t* temp = 0;
ccv_dense_matrix_t* imgs0 = 0;
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width);
ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA);
assert(imgs0->step == steps[0]);
ccv_matrix_free(temp);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
assert(imgs1->step == steps[1]);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
assert(imgs2->step == steps[2]);
negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
unsigned char* u8s0 = negdata[negtotal];
unsigned char* u8s1 = negdata[negtotal] + isizs0;
unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1;
unsigned char* u8[] = { u8s0, u8s1, u8s2 };
memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step);
ccv_matrix_free(imgs0);
memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step);
ccv_matrix_free(imgs1);
memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step);
ccv_matrix_free(imgs2);
flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (k = 0; k < cascade->count; ++k, ++classifier)
{
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (!flag)
ccfree(negdata[negtotal]);
else {
++negtotal;
if (negtotal >= negnum)
break;
}
}
ccv_array_free(detected);
ccv_matrix_free(image);
ccv_drain_cache();
printf("\rpreparing negative data ... %2d%%", 100 * negtotal / negnum);
fflush(0);
if (negtotal >= negnum)
break;
}
if (rneg == negtotal)
break;
rneg = negtotal;
printf("\nentering additional round %d\n", t + 1);
}
gsl_rng_free(rng);
ccfree(idcheck);
ccv_drain_cache();
printf("\n");
return negtotal;
}
static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum)
{
printf("preparing positive data ... 0%%");
int i;
for (i = 0; i < posnum; i++)
{
ccv_dense_matrix_t* imgs0 = posimg[i];
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
int isizs0 = imgs0->rows * imgs0->step;
int isizs1 = imgs1->rows * imgs1->step;
int isizs2 = imgs2->rows * imgs2->step;
posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
memcpy(posdata[i], imgs0->data.u8, isizs0);
memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1);
memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2);
printf("\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum);
fflush(0);
ccv_matrix_free(imgs1);
ccv_matrix_free(imgs2);
}
ccv_drain_cache();
printf("\n");
}
typedef struct {
double fitness;
int pk, nk;
int age;
double error;
ccv_bbf_feature_t feature;
} ccv_bbf_gene_t;
static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene)
{
gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015));
}
static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z)
{
int i;
for (i = 0; i < gene->pk; i++)
if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i])
return 1;
for (i = 0; i < gene->nk; i++)
if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i])
return 1;
return 0;
}
static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols)
{
int i;
do {
gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
} while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */
gene->feature.size = ccv_max(gene->pk, gene->nk);
gene->age = 0;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
{
gene->feature.pz[i] = -1;
gene->feature.nz[i] = -1;
}
int x, y, z;
for (i = 0; i < gene->pk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.pz[i] = z;
gene->feature.px[i] = x;
gene->feature.py[i] = y;
}
for (i = 0; i < gene->nk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while ( _ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.nz[i] = z;
gene->feature.nx[i] = x;
gene->feature.ny[i] = y;
}
}
static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
double error = 0;
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
if (!_ccv_run_bbf_feature(feature, steps, u8))
error += pw[i];
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
if ( _ccv_run_bbf_feature(feature, steps, u8))
error += nw[i];
}
return error;
}
#define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_feature_t best;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j;
int pnum = ftnum * 100;
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t));
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
for (i = 0; i < pnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
double best_err = 1;
int rnum = ftnum * 39; /* number of randomize */
int mnum = ftnum * 40; /* number of mutation */
int hnum = ftnum * 20; /* number of hybrid */
/* iteration stop crit : best no change in 40 iterations */
int it = 0, t;
for (t = 0 ; it < 40; ++it, ++t)
{
int min_id = 0;
double min_err = gene[0].error;
for (i = 1; i < pnum; i++)
if (gene[i].error < min_err)
{
min_id = i;
min_err = gene[i].error;
}
min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw);
if (min_err < best_err)
{
best_err = min_err;
memcpy(&best, &gene[min_id].feature, sizeof(best));
printf("best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size);
for (i = 0; i < best.size; i++)
printf("(%d %d %d), ", best.px[i], best.py[i], best.pz[i]);
printf("\n|-negative point: ");
for (i = 0; i < best.size; i++)
printf("(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]);
printf("\n");
it = 0;
}
printf("minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000);
_ccv_bbf_genetic_qsort(gene, pnum, 0);
for (i = 0; i < ftnum; i++)
++gene[i].age;
for (i = ftnum; i < ftnum + mnum; i++)
{
int parent = gsl_rng_uniform_int(rng, ftnum);
memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t));
/* three mutation strategy : 1. add, 2. remove, 3. refine */
int pnm, pn = gsl_rng_uniform_int(rng, 2);
int* pnk[] = { &gene[i].pk, &gene[i].nk };
int* pnx[] = { gene[i].feature.px, gene[i].feature.nx };
int* pny[] = { gene[i].feature.py, gene[i].feature.ny };
int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz };
int x, y, z;
int victim, decay = 1;
do {
switch (gsl_rng_uniform_int(rng, 3))
{
case 0: /* add */
if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX)
break;
while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX)
pn = gsl_rng_uniform_int(rng, 2);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][*pnk[pn]] = z;
pnx[pn][*pnk[pn]] = x;
pny[pn][*pnk[pn]] = y;
++(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 1: /* remove */
if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */
break;
while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN)
pn = gsl_rng_uniform_int(rng, 2);
victim = gsl_rng_uniform_int(rng, *pnk[pn]);
for (j = victim; j < *pnk[pn] - 1; j++)
{
pnz[pn][j] = pnz[pn][j + 1];
pnx[pn][j] = pnx[pn][j + 1];
pny[pn][j] = pny[pn][j + 1];
}
pnz[pn][*pnk[pn] - 1] = -1;
--(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 2: /* refine */
pnm = gsl_rng_uniform_int(rng, *pnk[pn]);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][pnm] = z;
pnx[pn][pnm] = x;
pny[pn][pnm] = y;
decay = gene[i].age = 0;
break;
}
} while (decay);
}
for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++)
{
/* hybrid strategy: taking positive points from dad, negative points from mum */
int dad, mum;
do {
dad = gsl_rng_uniform_int(rng, ftnum);
mum = gsl_rng_uniform_int(rng, ftnum);
} while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */
for (j = 0; j < CCV_BBF_POINT_MAX; j++)
{
gene[i].feature.pz[j] = -1;
gene[i].feature.nz[j] = -1;
}
gene[i].pk = gene[dad].pk;
for (j = 0; j < gene[i].pk; j++)
{
gene[i].feature.pz[j] = gene[dad].feature.pz[j];
gene[i].feature.px[j] = gene[dad].feature.px[j];
gene[i].feature.py[j] = gene[dad].feature.py[j];
}
gene[i].nk = gene[mum].nk;
for (j = 0; j < gene[i].nk; j++)
{
gene[i].feature.nz[j] = gene[mum].feature.nz[j];
gene[i].feature.nx[j] = gene[mum].feature.nx[j];
gene[i].feature.ny[j] = gene[mum].feature.ny[j];
}
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
gene[i].age = 0;
}
for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
}
ccfree(gene);
gsl_rng_free(rng);
return best;
}
#define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
_ccv_bbf_best_qsort(gene, pnum, 0);
int min_id = 0;
double min_err = gene[0].error;
for (i = 0; i < pnum; i++)
if (gene[i].nk + gene[i].pk >= point_min)
{
min_id = i;
min_err = gene[i].error;
break;
}
printf("local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size);
for (i = 0; i < gene[min_id].feature.size; i++)
printf("(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]);
printf("\n|-negative point: ");
for (i = 0; i < gene[min_id].feature.size; i++)
printf("(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]);
printf("\nthe computation takes %d ms\n", timer / 1000);
return gene[min_id];
}
static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_gene_t best_gene;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j, k, q, p, g, t;
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2];
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t));
if (best_feature == 0)
{
/* bootstrapping the best feature, start from two pixels, one for positive, one for negative
* the bootstrapping process go like this: first, it will assign a random pixel as positive
* and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every
* possible pixel as positive, and pick the best one, until it converges */
memset(&best_gene, 0, sizeof(ccv_bbf_gene_t));
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1;
best_gene.pk = 1;
best_gene.nk = 0;
best_gene.feature.size = 1;
best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3);
best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]);
best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]);
for (t = 0; ; ++t)
{
g = 0;
if (t % 2 == 0)
{
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.nz[0] = i;
gene[g].feature.nx[0] = j;
gene[g].feature.ny[0] = k;
g++;
}
} else {
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.pz[0] = i;
gene[g].feature.px[0] = j;
gene[g].feature.py[0] = k;
g++;
}
}
printf("bootstrapping round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
} else {
best_gene.feature = *best_feature;
best_gene.pk = best_gene.nk = best_gene.feature.size;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->pz[i] == -1)
{
best_gene.pk = i;
break;
}
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->nz[i] == -1)
{
best_gene.nk = i;
break;
}
}
/* after bootstrapping, the float search technique will do the following permutations:
* a). add a new point to positive or negative
* b). remove a point from positive or negative
* c). move an existing point in positive or negative to another position
* the three rules applied exhaustively, no heuristic used. */
for (t = 0; ; ++t)
{
g = 0;
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i))
{
/* add positive point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* add negative point */
if (best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* refine positive point */
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[q] = i;
gene[g].feature.px[q] = j;
gene[g].feature.py[q] = k;
g++;
}
/* add positive point, remove negative point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1)
{
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
for (p = q; p < best_gene.nk - 1; p++)
{
gene[g].feature.nz[p] = gene[g].feature.nz[p + 1];
gene[g].feature.nx[p] = gene[g].feature.nx[p + 1];
gene[g].feature.ny[p] = gene[g].feature.ny[p + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
/* refine negative point */
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[q] = i;
gene[g].feature.nx[q] = j;
gene[g].feature.ny[q] = k;
g++;
}
/* add negative point, remove positive point */
if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
for (p = q; p < best_gene.pk - 1; p++)
{
gene[g].feature.pz[p] = gene[g].feature.pz[p + 1];
gene[g].feature.px[p] = gene[g].feature.px[p + 1];
gene[g].feature.py[p] = gene[g].feature.py[p + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
}
if (best_gene.pk > 1)
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.pk - 1; i++)
{
gene[g].feature.pz[i] = gene[g].feature.pz[i + 1];
gene[g].feature.px[i] = gene[g].feature.px[i + 1];
gene[g].feature.py[i] = gene[g].feature.py[i + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
if (best_gene.nk > 1)
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.nk - 1; i++)
{
gene[g].feature.nz[i] = gene[g].feature.nz[i + 1];
gene[g].feature.nx[i] = gene[g].feature.nx[i + 1];
gene[g].feature.ny[i] = gene[g].feature.ny[i + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
gene[g] = best_gene;
g++;
printf("float search round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
ccfree(gene);
gsl_rng_free(rng);
return best_gene.feature;
}
static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* w = fopen(file, "wb");
if (w == 0) return -1;
fprintf(w, "%d\n", classifier->count);
union { float fl; int i; } fli;
fli.fl = classifier->threshold;
fprintf(w, "%d\n", fli.i);
int i, j;
for (i = 0; i < classifier->count; i++)
{
fprintf(w, "%d\n", classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]);
fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
flia.fl = classifier->alpha[i * 2];
flib.fl = classifier->alpha[i * 2 + 1];
fprintf(w, "%d %d\n", flia.i, flib.i);
}
fclose(w);
return 0;
}
static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size)
{
int stat = 0;
FILE* r = fopen(file, "rb");
if (r == 0) return -1;
stat |= fread(negnum, sizeof(int), 1, r);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < *negnum; i++)
{
negdata[i] = (unsigned char*)ccmalloc(isizs012);
stat |= fread(negdata[i], 1, isizs012, r);
}
fclose(r);
return 0;
}
static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fwrite(&negnum, sizeof(int), 1, w);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < negnum; i++)
fwrite(negdata[i], 1, isizs012, w);
fclose(w);
return 0;
}
static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum)
{
int stat = 0;
FILE* r = fopen(file, "r");
if (r == 0) return -1;
stat |= fscanf(r, "%d %d %d", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
pw[j] = dbi.db;
}
for (j = 0; j < negnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
nw[j] = dbi.db;
}
fclose(r);
return 0;
}
static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fprintf(w, "%d %d %d\n", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; ++j)
{
dbi.db = pw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
for (j = 0; j < negnum; ++j)
{
dbi.db = nw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
fclose(w);
return 0;
}
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
int i, j, k;
/* allocate memory for usage */
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
cascade->count = 0;
cascade->size = size;
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t));
unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*));
unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*));
double* pw = (double*)ccmalloc(posnum * sizeof(double));
double* nw = (double*)ccmalloc(negnum * sizeof(double));
float* peval = (float*)ccmalloc(posnum * sizeof(float));
float* neval = (float*)ccmalloc(negnum * sizeof(float));
double inv_balance_k = 1. / params.balance_k;
/* balance factor k, and weighted with 0.01 */
params.balance_k *= 0.01;
inv_balance_k *= 0.01;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1);
i = 0;
k = 0;
int bg = 0;
int cacheK = 10;
/* state resume code */
char buf[1024];
sprintf(buf, "%s/stat.txt", dir);
_ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum);
if (i > 0)
{
cascade->count = i;
ccfree(cascade->stage_classifier);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t));
for (j = 0; j < i; j++)
{
sprintf(buf, "%s/stage-%d.txt", dir, j);
_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]);
}
}
if (k > 0)
cacheK = k;
int rpos, rneg = 0;
if (bg)
{
sprintf(buf, "%s/negs.txt", dir);
_ccv_read_background_data(buf, negdata, &rneg, cascade->size);
}
for (; i < params.layer; i++)
{
if (!bg)
{
rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum);
/* save state of background data */
sprintf(buf, "%s/negs.txt", dir);
_ccv_write_background_data(buf, negdata, rneg, cascade->size);
bg = 1;
}
double totalw;
/* save state of cascade : level, weight etc. */
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
ccv_bbf_stage_classifier_t classifier;
if (k > 0)
{
/* resume state of classifier */
sprintf( buf, "%s/stage-%d.txt", dir, i );
_ccv_read_bbf_stage_classifier(buf, &classifier);
} else {
/* initialize classifier */
for (j = 0; j < posnum; j++)
pw[j] = params.balance_k;
for (j = 0; j < rneg; j++)
nw[j] = inv_balance_k;
classifier.count = k;
classifier.threshold = 0;
classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t));
classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float));
}
_ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum);
rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size);
printf("%d postivie data and %d negative data in training\n", rpos, rneg);
/* reweight to 1.00 */
totalw = 0;
for (j = 0; j < rpos; j++)
totalw += pw[j];
for (j = 0; j < rneg; j++)
totalw += nw[j];
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
for (; ; k++)
{
/* get overall true-positive, false-positive rate and threshold */
double tp = 0, fp = 0, etp = 0, efp = 0;
_ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval);
_ccv_sort_32f(peval, rpos, 0);
classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6;
for (j = 0; j < rpos; j++)
{
if (peval[j] >= 0)
++tp;
if (peval[j] >= classifier.threshold)
++etp;
}
tp /= rpos; etp /= rpos;
for (j = 0; j < rneg; j++)
{
if (neval[j] >= 0)
++fp;
if (neval[j] >= classifier.threshold)
++efp;
}
fp /= rneg; efp /= rneg;
printf("stage classifier real TP rate : %f, FP rate : %f\n", tp, fp);
printf("stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold);
if (k > 0)
{
/* save classifier state */
sprintf(buf, "%s/stage-%d.txt", dir, i);
_ccv_write_bbf_stage_classifier(buf, &classifier);
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
}
if (etp > params.pos_crit && efp < params.neg_crit)
break;
/* TODO: more post-process is needed in here */
/* select the best feature in current distribution through genetic algorithm optimization */
ccv_bbf_feature_t best;
if (params.optimizer == CCV_BBF_GENETIC_OPT)
{
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
} else if (params.optimizer == CCV_BBF_FLOAT_OPT) {
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw);
} else {
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw);
}
double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw);
double rw = (1 - err) / err;
totalw = 0;
/* reweight */
for (j = 0; j < rpos; j++)
{
unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 };
if (!_ccv_run_bbf_feature(&best, steps, u8))
pw[j] *= rw;
pw[j] *= params.balance_k;
totalw += pw[j];
}
for (j = 0; j < rneg; j++)
{
unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 };
if (_ccv_run_bbf_feature(&best, steps, u8))
nw[j] *= rw;
nw[j] *= inv_balance_k;
totalw += nw[j];
}
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
double c = log(rw);
printf("coefficient of feature %d: %f\n", k + 1, c);
classifier.count = k + 1;
/* resizing classifier */
if (k >= cacheK)
{
ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t));
memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t));
ccfree(classifier.feature);
float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float));
memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float));
ccfree(classifier.alpha);
classifier.feature = feature;
classifier.alpha = alpha;
cacheK *= 2;
}
/* setup new feature */
classifier.feature[k] = best;
classifier.alpha[k * 2] = -c;
classifier.alpha[k * 2 + 1] = c;
}
cascade->count = i + 1;
ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t));
ccfree(cascade->stage_classifier);
stage_classifier[i] = classifier;
cascade->stage_classifier = stage_classifier;
k = 0;
bg = 0;
for (j = 0; j < rpos; j++)
ccfree(posdata[j]);
for (j = 0; j < rneg; j++)
ccfree(negdata[j]);
}
ccfree(neval);
ccfree(peval);
ccfree(nw);
ccfree(pw);
ccfree(negdata);
ccfree(posdata);
ccfree(cascade);
}
#else
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->id == r1->id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params)
{
int hr = a->rows / params.size.height;
int wr = a->cols / params.size.width;
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale));
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA);
else
pyr[0] = a;
int i, j, k, t, x, y, q;
for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++)
ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA);
for (i = next; i < scale_upto + next * 2; i++)
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1);
}
ccv_array_t* idx_seq;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
/* detect in multi scale */
for (t = 0; t < count; t++)
{
ccv_bbf_classifier_cascade_t* cascade = _cascade[t];
float scale_x = (float) params.size.width / (float) cascade->size.width;
float scale_y = (float) params.size.height / (float) cascade->size.height;
ccv_array_clear(seq);
for (i = 0; i < scale_upto; i++)
{
int dx[] = {0, 1, 0, 1};
int dy[] = {0, 0, 1, 1};
int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2);
int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step };
int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2);
int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4,
pyr[i * 4 + next * 4]->step * 2 - i_cols * 2,
pyr[i * 4 + next * 8]->step - i_cols };
for (q = 0; q < (params.accurate ? 4 : 1); q++)
{
unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 };
for (y = 0; y < i_rows; y++)
{
for (x = 0; x < i_cols; x++)
{
float sum;
int flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (j = 0; j < cascade->count; ++j, ++classifier)
{
sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (flag)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5));
comp.id = t;
comp.neighbors = 1;
comp.confidence = sum;
ccv_array_push(seq, &comp);
}
u8[0] += 4;
u8[1] += 2;
u8[2] += 1;
}
u8[0] += paddings[0];
u8[1] += paddings[1];
u8[2] += paddings[2];
}
}
scale_x *= scale;
scale_y *= scale;
}
/* the following code from OpenCV's haar feature implementation */
if(params.min_neighbors == 0)
{
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
ccv_array_push(result_seq, comp);
}
} else {
idx_seq = 0;
ccv_array_clear(seq2);
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0)
comps[idx].confidence = r1.confidence;
++comps[idx].neighbors;
comps[idx].rect.x += r1.rect.x;
comps[idx].rect.y += r1.rect.y;
comps[idx].rect.width += r1.rect.width;
comps[idx].rect.height += r1.rect.height;
comps[idx].id = r1.id;
comps[idx].confidence = ccv_max(comps[idx].confidence, r1.confidence);
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
{
int n = comps[i].neighbors;
if(n >= params.min_neighbors)
{
ccv_comp_t comp;
comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n);
comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n);
comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n);
comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n);
comp.neighbors = comps[i].neighbors;
comp.id = comps[i].id;
comp.confidence = comps[i].confidence;
ccv_array_push(seq2, &comp);
}
}
// filter out small face rectangles inside large face rectangles
for(i = 0; i < seq2->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i);
int flag = 1;
for(j = 0; j < seq2->rnum; j++)
{
ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j);
int distance = (int)(r2.rect.width * 0.25 + 0.5);
if(i != j &&
r1.id == r2.id &&
r1.rect.x >= r2.rect.x - distance &&
r1.rect.y >= r2.rect.y - distance &&
r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&
r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&
(r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3))
{
flag = 0;
break;
}
}
if(flag)
ccv_array_push(result_seq, &r1);
}
ccv_array_free(idx_seq);
ccfree(comps);
}
}
ccv_array_free(seq);
ccv_array_free(seq2);
ccv_array_t* result_seq2;
/* the following code from OpenCV's haar feature implementation */
if (params.flags & CCV_BBF_NO_NESTED)
{
result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
idx_seq = 0;
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < result_seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0 || comps[idx].confidence < r1.confidence)
{
comps[idx].confidence = r1.confidence;
comps[idx].neighbors = 1;
comps[idx].rect = r1.rect;
comps[idx].id = r1.id;
}
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
if(comps[i].neighbors)
ccv_array_push(result_seq2, &comps[i]);
ccv_array_free(result_seq);
ccfree(comps);
} else {
result_seq2 = result_seq;
}
for (i = 1; i < scale_upto + next * 2; i++)
ccv_matrix_free(pyr[i * 4]);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_matrix_free(pyr[i * 4 + 1]);
ccv_matrix_free(pyr[i * 4 + 2]);
ccv_matrix_free(pyr[i * 4 + 3]);
}
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_matrix_free(pyr[0]);
return result_seq2;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory)
{
char buf[1024];
sprintf(buf, "%s/cascade.txt", directory);
int s, i;
FILE* r = fopen(buf, "r");
if (r == 0)
return 0;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++)
{
sprintf(buf, "%s/stage-%d.txt", directory, i);
if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0)
{
cascade->count = i;
break;
}
}
fclose(r);
return cascade;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s)
{
int i;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
return cascade;
}
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen)
{
int i;
int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float);
if (slen >= len)
{
memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
}
return len;
}
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; ++i)
{
ccfree(cascade->stage_classifier[i].feature);
ccfree(cascade->stage_classifier[i].alpha);
}
ccfree(cascade->stage_classifier);
ccfree(cascade);
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(4*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(4*t3+Nx-9,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),32*t4+30);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
softmax_layer.c | #include "softmax_layer.h"
#include "blas.h"
#include "dark_cuda.h"
#include "utils.h"
#include "blas.h"
#include <float.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#define SECRET_NUM -1234
void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output)
{
int b;
for (b = 0; b < batch; ++b) {
int i;
int count = 0;
for (i = 0; i < hierarchy->groups; ++i) {
int group_size = hierarchy->group_size[i];
softmax(input + b*inputs + count, group_size, temp, output + b*inputs + count, 1);
count += group_size;
}
}
}
softmax_layer make_softmax_layer(int batch, int inputs, int groups)
{
assert(inputs%groups == 0);
fprintf(stderr, "softmax %4d\n", inputs);
softmax_layer l = { (LAYER_TYPE)0 };
l.type = SOFTMAX;
l.batch = batch;
l.groups = groups;
l.inputs = inputs;
l.outputs = inputs;
l.loss = (float*)xcalloc(inputs * batch, sizeof(float));
l.output = (float*)xcalloc(inputs * batch, sizeof(float));
l.delta = (float*)xcalloc(inputs * batch, sizeof(float));
l.cost = (float*)xcalloc(1, sizeof(float));
l.forward = forward_softmax_layer;
l.backward = backward_softmax_layer;
#ifdef GPU
l.forward_gpu = forward_softmax_layer_gpu;
l.backward_gpu = backward_softmax_layer_gpu;
l.output_gpu = cuda_make_array(l.output, inputs*batch);
l.loss_gpu = cuda_make_array(l.loss, inputs*batch);
l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
#endif
return l;
}
void forward_softmax_layer(const softmax_layer l, network_state net)
{
if(l.softmax_tree){
int i;
int count = 0;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_cpu(net.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count);
count += group_size;
}
} else {
softmax_cpu(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output);
}
if(net.truth && !l.noloss){
softmax_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss);
l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
}
}
void backward_softmax_layer(const softmax_layer l, network_state net)
{
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
}
#ifdef GPU
void pull_softmax_layer_output(const softmax_layer layer)
{
cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch);
}
void forward_softmax_layer_gpu(const softmax_layer l, network_state net)
{
if(l.softmax_tree){
softmax_tree_gpu(net.input, 1, l.batch, l.inputs, l.temperature, l.output_gpu, *l.softmax_tree);
/*
int i;
int count = 0;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_gpu(net.input_gpu + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output_gpu + count);
count += group_size;
}
*/
} else {
if(l.spatial){
softmax_gpu_new_api(net.input, l.c, l.batch*l.c, l.inputs/l.c, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu);
}else{
softmax_gpu_new_api(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output_gpu);
}
}
if(net.truth && !l.noloss){
softmax_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth, l.delta_gpu, l.loss_gpu);
if(l.softmax_tree){
mask_gpu_new_api(l.batch*l.inputs, l.delta_gpu, SECRET_NUM, net.truth, 0);
mask_gpu_new_api(l.batch*l.inputs, l.loss_gpu, SECRET_NUM, net.truth, 0);
}
cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs);
l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
}
}
void backward_softmax_layer_gpu(const softmax_layer layer, network_state state)
{
axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1);
}
#endif
// -------------------------------------
// Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf
contrastive_layer make_contrastive_layer(int batch, int w, int h, int c, int classes, int inputs, layer *yolo_layer)
{
contrastive_layer l = { (LAYER_TYPE)0 };
l.type = CONTRASTIVE;
l.batch = batch;
l.inputs = inputs;
l.w = w;
l.h = h;
l.c = c;
l.temperature = 1;
l.max_boxes = 0;
if (yolo_layer) {
l.detection = 1;
l.max_boxes = yolo_layer->max_boxes;
l.labels = yolo_layer->labels; // track id
l.n = yolo_layer->n; // num of embeddings per cell = num of anchors
l.classes = yolo_layer->classes;// num of classes
classes = l.classes;
l.embedding_size = l.inputs / (l.n*l.h*l.w);
l.truths = yolo_layer->truths;
if (l.embedding_size != yolo_layer->embedding_size) {
printf(" Error: [contrastive] embedding_size=%d isn't equal to [yolo] embedding_size=%d. They should use the same [convolutional] layer \n", l.embedding_size, yolo_layer->embedding_size);
getchar();
exit(0);
}
if (l.inputs % (l.n*l.h*l.w) != 0) {
printf(" Warning: filters= number in the previous (embedding) layer isn't divisable by number of anchors %d \n", l.n);
getchar();
}
}
else {
l.detection = 0;
l.labels = (int*)xcalloc(l.batch, sizeof(int)); // labels
l.n = 1; // num of embeddings per cell
l.classes = classes; // num of classes
l.embedding_size = l.c;
}
l.outputs = inputs;
l.loss = (float*)xcalloc(1, sizeof(float));
l.output = (float*)xcalloc(inputs * batch, sizeof(float));
l.delta = (float*)xcalloc(inputs * batch, sizeof(float));
l.cost = (float*)xcalloc(1, sizeof(float));
const size_t step = l.batch*l.n*l.h*l.w;
l.cos_sim = NULL;
l.exp_cos_sim = NULL;
l.p_constrastive = NULL;
if (!l.detection) {
l.cos_sim = (float*)xcalloc(step*step, sizeof(float));
l.exp_cos_sim = (float*)xcalloc(step*step, sizeof(float));
l.p_constrastive = (float*)xcalloc(step*step, sizeof(float));
}
//l.p_constrastive = (float*)xcalloc(step*step, sizeof(float));
//l.contrast_p_size = (int*)xcalloc(1, sizeof(int));
//*l.contrast_p_size = step;
//l.contrast_p = (contrastive_params*)xcalloc(*l.contrast_p_size, sizeof(contrastive_params));
l.forward = forward_contrastive_layer;
l.backward = backward_contrastive_layer;
#ifdef GPU
l.forward_gpu = forward_contrastive_layer_gpu;
l.backward_gpu = backward_contrastive_layer_gpu;
l.output_gpu = cuda_make_array(l.output, inputs*batch);
l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch) * sizeof(contrastive_params)/4;
printf(" max_contr_size = %d MB \n", max_contr_size / (1024*1024));
l.contrast_p_gpu = (contrastive_params *)cuda_make_array(NULL, max_contr_size);
#endif
fprintf(stderr, "contrastive %4d x%4d x%4d x emb_size %4d x batch: %4d classes = %4d, step = %4d \n", w, h, l.n, l.embedding_size, batch, l.classes, step);
if(l.detection) fprintf(stderr, "detection \n");
return l;
}
static inline float clip_value(float val, const float max_val)
{
if (val > max_val) {
//printf("\n val = %f > max_val = %f \n", val, max_val);
val = max_val;
}
else if (val < -max_val) {
//printf("\n val = %f < -max_val = %f \n", val, -max_val);
val = -max_val;
}
return val;
}
void forward_contrastive_layer(contrastive_layer l, network_state state)
{
if (!state.train) return;
const float truth_thresh = state.net.label_smooth_eps;
const int mini_batch = l.batch / l.steps;
int b, n, w, h;
fill_cpu(l.batch*l.inputs, 0, l.delta, 1);
if (!l.detection) {
for (b = 0; b < l.batch; ++b) {
if (state.net.adversarial) l.labels[b] = b % 2;
else l.labels[b] = b / 2;
}
// set labels
for (b = 0; b < l.batch; ++b) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
// find truth with max prob (only 1 label even if mosaic is used)
float max_truth = 0;
int n;
for (n = 0; n < l.classes; ++n) {
const float truth_prob = state.truth[b*l.classes + n];
//printf(" truth_prob = %f, ", truth_prob);
//if (truth_prob > max_truth)
if (truth_prob > truth_thresh)
{
//printf(" truth_prob = %f, max_truth = %f, n = %d; ", truth_prob, max_truth, n);
max_truth = truth_prob;
l.labels[b] = n;
}
}
//printf(", l.labels[b] = %d ", l.labels[b]);
}
}
}
}
//printf("\n\n");
// set pointers to features
float **z = (float**)xcalloc(l.batch*l.n*l.h*l.w, sizeof(float*));
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
if (l.labels[z_index] < 0) continue;
//const int input_index = b*l.inputs + n*l.embedding_size*l.h*l.w + h*l.w + w;
//float *ptr = state.input + input_index;
//z[z_index] = ptr;
z[z_index] = (float*)xcalloc(l.embedding_size, sizeof(float));
get_embedding(state.input, l.w, l.h, l.c, l.embedding_size, w, h, n, b, z[z_index]);
}
}
}
}
int b2, n2, h2, w2;
int contrast_p_index = 0;
const size_t step = l.batch*l.n*l.h*l.w;
size_t contrast_p_size = step;
if (!l.detection) contrast_p_size = l.batch*l.batch;
contrastive_params *contrast_p = (contrastive_params*)xcalloc(contrast_p_size, sizeof(contrastive_params));
float *max_sim_same = (float *)xcalloc(l.batch*l.inputs, sizeof(float));
float *max_sim_diff = (float *)xcalloc(l.batch*l.inputs, sizeof(float));
fill_cpu(l.batch*l.inputs, -10, max_sim_same, 1);
fill_cpu(l.batch*l.inputs, -10, max_sim_diff, 1);
// precalculate cosine similiraty
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
if (l.labels[z_index] < 0) continue;
for (b2 = 0; b2 < l.batch; ++b2) {
for (n2 = 0; n2 < l.n; ++n2) {
for (h2 = 0; h2 < l.h; ++h2) {
for (w2 = 0; w2 < l.w; ++w2)
{
const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2;
if (l.labels[z_index2] < 0) continue;
if (z_index == z_index2) continue;
const int time_step_i = b / mini_batch;
const int time_step_j = b2 / mini_batch;
if (time_step_i != time_step_j) continue;
const size_t step = l.batch*l.n*l.h*l.w;
const float sim = cosine_similarity(z[z_index], z[z_index2], l.embedding_size);
const float exp_sim = expf(sim / l.temperature);
if (!l.detection) {
l.cos_sim[z_index*step + z_index2] = sim;
l.exp_cos_sim[z_index*step + z_index2] = exp_sim;
}
// calc good sim
if (l.labels[z_index] == l.labels[z_index2] && max_sim_same[z_index] < sim) max_sim_same[z_index] = sim;
if (l.labels[z_index] != l.labels[z_index2] && max_sim_diff[z_index] < sim) max_sim_diff[z_index] = sim;
//printf(" z_i = %d, z_i2 = %d, l = %d, l2 = %d, sim = %f \n", z_index, z_index2, l.labels[z_index], l.labels[z_index2], sim);
contrast_p[contrast_p_index].sim = sim;
contrast_p[contrast_p_index].exp_sim = exp_sim;
contrast_p[contrast_p_index].i = z_index;
contrast_p[contrast_p_index].j = z_index2;
contrast_p[contrast_p_index].time_step_i = time_step_i;
contrast_p[contrast_p_index].time_step_j = time_step_j;
contrast_p_index++;
//printf(" contrast_p_index = %d, contrast_p_size = %d \n", contrast_p_index, contrast_p_size);
if ((contrast_p_index+1) >= contrast_p_size) {
contrast_p_size = contrast_p_index + 1;
//printf(" contrast_p_size = %d, z_index = %d, z_index2 = %d \n", contrast_p_size, z_index, z_index2);
contrast_p = (contrastive_params*)xrealloc(contrast_p, contrast_p_size * sizeof(contrastive_params));
}
if (sim > 1.001 || sim < -1.001) {
printf(" sim = %f, ", sim); getchar();
}
}
}
}
}
}
}
}
}
// calc contrastive accuracy
int i;
int good_sims = 0, all_sims = 0, same_sim = 0, diff_sim = 0;
for (i = 0; i < l.batch*l.inputs; ++i) {
if (max_sim_same[i] >= -1 && max_sim_diff[i] >= -1) {
if (max_sim_same[i] >= -1) same_sim++;
if (max_sim_diff[i] >= -1) diff_sim++;
++all_sims;
//printf(" max_sim_diff[i] = %f, max_sim_same[i] = %f \n", max_sim_diff[i], max_sim_same[i]);
if (max_sim_diff[i] < max_sim_same[i]) good_sims++;
}
}
if (all_sims > 0) {
*l.loss = 100 * good_sims / all_sims;
}
else *l.loss = -1;
printf(" Contrast accuracy = %f %%, all = %d, good = %d, same = %d, diff = %d \n", *l.loss, all_sims, good_sims, same_sim, diff_sim);
free(max_sim_same);
free(max_sim_diff);
/*
// show near sim
float good_contrast = 0;
for (b = 0; b < l.batch; b += 2) {
float same = l.cos_sim[b*l.batch + b];
float aug = l.cos_sim[b*l.batch + b + 1];
float diff = l.cos_sim[b*l.batch + b + 2];
good_contrast += (aug > diff);
//printf(" l.labels[b] = %d, l.labels[b+1] = %d, l.labels[b+2] = %d, b = %d \n", l.labels[b], l.labels[b + 1], l.labels[b + 2], b);
//printf(" same = %f, aug = %f, diff = %f, (aug > diff) = %d \n", same, aug, diff, (aug > diff));
}
*l.loss = 100 * good_contrast / (l.batch / 2);
printf(" Contrast accuracy = %f %% \n", *l.loss);
*/
/*
// precalculate P_contrastive
for (b = 0; b < l.batch; ++b) {
int b2;
for (b2 = 0; b2 < l.batch; ++b2) {
if (b != b2) {
const float P = P_constrastive(b, b2, l.labels, l.batch, z, l.embedding_size, l.temperature, l.cos_sim);
l.p_constrastive[b*l.batch + b2] = P;
if (P > 1 || P < -1) {
printf(" p = %f, ", P); getchar();
}
}
}
}
*/
const size_t contr_size = contrast_p_index;
if (l.detection) {
#ifdef GPU
const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch);
if (max_contr_size < contr_size) {
printf(" Error: too large number of bboxes: contr_size = %d > max_contr_size = %d \n", contr_size, max_contr_size);
exit(0);
}
int *labels = NULL;
if (contr_size > 2) {
cuda_push_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4);
P_constrastive_f_det_gpu(labels, l.embedding_size, l.temperature, l.contrast_p_gpu, contr_size);
cuda_pull_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4);
}
#else // GPU
int k;
//#pragma omp parallel for
for (k = 0; k < contr_size; ++k) {
contrast_p[k].P = P_constrastive_f_det(k, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size);
}
#endif // GPU
}
else {
// precalculate P-contrastive
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
if (l.labels[z_index] < 0) continue;
for (b2 = 0; b2 < l.batch; ++b2) {
for (n2 = 0; n2 < l.n; ++n2) {
for (h2 = 0; h2 < l.h; ++h2) {
for (w2 = 0; w2 < l.w; ++w2)
{
const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2;
if (l.labels[z_index2] < 0) continue;
if (z_index == z_index2) continue;
const int time_step_i = b / mini_batch;
const int time_step_j = b2 / mini_batch;
if (time_step_i != time_step_j) continue;
const size_t step = l.batch*l.n*l.h*l.w;
float P = -10;
if (l.detection) {
P = P_constrastive_f(z_index, z_index2, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size);
}
else {
P = P_constrastive(z_index, z_index2, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.exp_cos_sim);
l.p_constrastive[z_index*step + z_index2] = P;
}
int q;
for (q = 0; q < contr_size; ++q)
if (contrast_p[q].i == z_index && contrast_p[q].j == z_index2) {
contrast_p[q].P = P;
break;
}
//if (q == contr_size) getchar();
//if (P > 1 || P < -1) {
// printf(" p = %f, z_index = %d, z_index2 = %d ", P, z_index, z_index2); getchar();
//}
}
}
}
}
}
}
}
}
}
// calc deltas
#pragma omp parallel for
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
const size_t step = l.batch*l.n*l.h*l.w;
if (l.labels[z_index] < 0) continue;
const int delta_index = b*l.embedding_size*l.n*l.h*l.w + n*l.embedding_size*l.h*l.w + h*l.w + w;
const int wh = l.w*l.h;
if (l.detection) {
// detector
// positive
grad_contrastive_loss_positive_f(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size);
// negative
grad_contrastive_loss_negative_f(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size);
}
else {
// classifier
// positive
grad_contrastive_loss_positive(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh);
// negative
grad_contrastive_loss_negative(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh);
}
}
}
}
}
scal_cpu(l.inputs * l.batch, l.cls_normalizer, l.delta, 1);
for (i = 0; i < l.inputs * l.batch; ++i) {
l.delta[i] = clip_value(l.delta[i], l.max_delta);
}
*(l.cost) = pow(mag_array(l.delta, l.inputs * l.batch), 2);
if (state.net.adversarial) {
printf(" adversarial contrastive loss = %f \n\n", *(l.cost));
}
else {
printf(" contrastive loss = %f \n\n", *(l.cost));
}
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
for (h = 0; h < l.h; ++h) {
for (w = 0; w < l.w; ++w)
{
const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w;
//if (l.labels[z_index] < 0) continue;
if (z[z_index]) free(z[z_index]);
}
}
}
}
free(contrast_p);
free(z);
}
void backward_contrastive_layer(contrastive_layer l, network_state state)
{
axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, state.delta, 1);
}
#ifdef GPU
void pull_contrastive_layer_output(const contrastive_layer l)
{
cuda_pull_array(l.output_gpu, l.output, l.inputs*l.batch);
}
void push_contrastive_layer_output(const contrastive_layer l)
{
cuda_push_array(l.delta_gpu, l.delta, l.inputs*l.batch);
}
void forward_contrastive_layer_gpu(contrastive_layer l, network_state state)
{
simple_copy_ongpu(l.batch*l.inputs, state.input, l.output_gpu);
if (!state.train) return;
float *in_cpu = (float *)xcalloc(l.batch*l.inputs, sizeof(float));
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
memcpy(in_cpu, l.output, l.batch*l.outputs * sizeof(float));
float *truth_cpu = 0;
if (state.truth) {
int num_truth = l.batch*l.classes;
if (l.detection) num_truth = l.batch*l.truths;
truth_cpu = (float *)xcalloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
}
network_state cpu_state = state;
cpu_state.net = state.net;
cpu_state.index = state.index;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
forward_contrastive_layer(l, cpu_state);
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
free(in_cpu);
if (cpu_state.truth) free(cpu_state.truth);
}
void backward_contrastive_layer_gpu(contrastive_layer layer, network_state state)
{
axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1);
}
#endif |
matmul_int.c | /*
* Square matrix multiplication
* A[N][N] * B[N][N] = C[N][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N 1024
//#define N 16
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void init(int **A) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
}
void matmul_simd(int **A, int **B, int **C) {
int i,j,k;
int temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
#pragma omp simd reduction(+:temp)
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
// Debug functions
void print_matrix(int **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%d ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void matmul_serial(int **A, int **B, int **C) {
int i,j,k;
int temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
int check(int **A, int **B){
int difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
// Main
int main(int argc, char *argv[]) {
//Set everything up
int **A = malloc(sizeof(int*)*N);
int **B = malloc(sizeof(int*)*N);
int **C_simd = malloc(sizeof(int*)*N);
int **C_serial = malloc(sizeof(int*)*N);
int **BT = malloc(sizeof(int*)*N);
for (int i = 0; i<N; i++) {
A[i] = malloc(sizeof(int)*N);
B[i] = malloc(sizeof(int)*N);
C_simd[i] = malloc(sizeof(int)*N);
C_serial[i] = malloc(sizeof(int)*N);
BT[i] = malloc(sizeof(int)*N);
}
srand(time(NULL));
init(A);
init(B);
for(int line = 0; line<N; line++){
for(int col = 0; col<N; col++){
BT[line][col] = B[col][line];
}
}
int i;
int num_runs = 20;
//Warming up
matmul_simd(A, BT, C_simd);
matmul_serial(A, BT, C_serial);
double elapsed = 0;
double elapsed1 = read_timer();
for (i=0; i<num_runs; i++)
matmul_simd(A, BT, C_simd);
elapsed += (read_timer() - elapsed1);
double elapsed_serial = 0;
double elapsed_serial1 = read_timer();
for (i=0; i<num_runs; i++)
matmul_serial(A, BT, C_serial);
elapsed_serial += (read_timer() - elapsed_serial1);
print_matrix(A);
print_matrix(BT);
puts("=\n");
print_matrix(C_simd);
puts("---------------------------------");
print_matrix(C_serial);
double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed));
double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial));
printf("======================================================================================================\n");
printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_omp:\t\t%4f\t%4f\n", elapsed/num_runs, gflops_omp);
printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial/num_runs, gflops_serial);
printf("Correctness check: %d\n", check(C_simd,C_serial));
return 0;
}
|
1.race5.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel for schedule(guided)
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i - 1][j - 1];
}
// CHECK: Data Race detected
// END
|
atomic_read_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
// Register "0" is currently an invalid register for global register variables.
// Use "esp" instead of "0".
// register int rix __asm__("0");
register int rix __asm__("esp");
int main() {
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
bv = bx;
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
cv = cx;
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
ucv = ucx;
// CHECK: load atomic i16, i16*
// CHECK: store i16
#pragma omp atomic read
sv = sx;
// CHECK: load atomic i16, i16*
// CHECK: store i16
#pragma omp atomic read
usv = usx;
// CHECK: load atomic i32, i32*
// CHECK: store i32
#pragma omp atomic read
iv = ix;
// CHECK: load atomic i32, i32*
// CHECK: store i32
#pragma omp atomic read
uiv = uix;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
lv = lx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
ulv = ulx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
llv = llx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
ullv = ullx;
// CHECK: load atomic i32, i32* bitcast (float*
// CHECK: bitcast i32 {{.*}} to float
// CHECK: store float
#pragma omp atomic read
fv = fx;
// CHECK: load atomic i64, i64* bitcast (double*
// CHECK: bitcast i64 {{.*}} to double
// CHECK: store double
#pragma omp atomic read
dv = dx;
// CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: store i128 [[LD]], i128* [[BITCAST]]
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]]
// CHECK: store x86_fp80 [[LD]]
#pragma omp atomic read
ldv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = cix;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = cfx;
// CHECK: call{{.*}} void @__atomic_load(i64 16,
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
// CHECK: store double
#pragma omp atomic seq_cst read
cdv = cdx;
// CHECK: load atomic i64, i64*
// CHECK: store i8
#pragma omp atomic read
bv = ulx;
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
cv = bx;
// CHECK: load atomic i8, i8*
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i8
#pragma omp atomic read, seq_cst
ucv = cx;
// CHECK: load atomic i64, i64*
// CHECK: store i16
#pragma omp atomic read
sv = ulx;
// CHECK: load atomic i64, i64*
// CHECK: store i16
#pragma omp atomic read
usv = lx;
// CHECK: load atomic i32, i32*
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i32
#pragma omp atomic seq_cst, read
iv = uix;
// CHECK: load atomic i32, i32*
// CHECK: store i32
#pragma omp atomic read
uiv = ix;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store i64
#pragma omp atomic read
lv = cix;
// CHECK: load atomic i32, i32*
// CHECK: store i64
#pragma omp atomic read
ulv = fx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
llv = dx;
// CHECK: load atomic i128, i128*
// CHECK: store i64
#pragma omp atomic read
ullv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store float
#pragma omp atomic read
fv = cix;
// CHECK: load atomic i16, i16*
// CHECK: store double
#pragma omp atomic read
dv = sx;
// CHECK: load atomic i8, i8*
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bx;
// CHECK: load atomic i8, i8*
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = bx;
// CHECK: load atomic i16, i16*
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = usx;
// CHECK: load atomic i64, i64*
// CHECK: store double
// CHECK: store double
#pragma omp atomic read
cdv = llx;
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic
// CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[I128VAL]], i128* [[I128PTR]]
// CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: extractelement <4 x i32> [[LD]]
// CHECK: store i8
#pragma omp atomic read
bv = int4x[0];
// CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* [[LDTEMP_VOID_PTR]], i32 0)
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: ashr i32 [[LD]], 31
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: ashr i8 [[LD]], 7
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7
// CHECK: ashr i32 [[SHL]], 18
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* [[LDTEMP_VOID_PTR]], i32 0)
// CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10
// CHECK: sext i24 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63
// CHECK: trunc i64 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7
// CHECK: sext i8 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.b;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
// CHECK: sext i8 [[ASHR]] to i64
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4_packed.b;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[LD]], i64* [[BITCAST]]
// CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: extractelement <2 x float> [[LD]]
// CHECK: store i64
#pragma omp atomic read
ulv = float2x.x;
// CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
#pragma omp atomic read seq_cst
dv = rix;
return 0;
}
#endif
|
sormlq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zunmlq.c, normal z -> s, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_unmlq
*
* Overwrites the general complex m-by-n matrix C with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * C C * Q
* trans = PlasmaTrans Q^T * C C * Q^T
*
* where Q is an orthogonal (or orthogonal) matrix defined as the product of k
* elementary reflectors
*
* Q = H(1) H(2) . . . H(k)
*
* as returned by plasma_sgelqf. Q is of order m if side = PlasmaLeft
* and of order n if side = PlasmaRight.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^T from the left;
* - PlasmaRight: apply Q or Q^T from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - PlasmaTrans: apply Q^T.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] k
* The number of rows of elementary tile reflectors whose product
* defines the matrix Q.
* If side == PlasmaLeft, m >= k >= 0.
* If side == PlasmaRight, n >= k >= 0.
*
* @param[in] pA
* Details of the LQ factorization of the original matrix A as returned
* by plasma_sgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_sgelqf.
*
* @param[in,out] pC
* On entry, pointer to the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_sormlq
* @sa plasma_cunmlq
* @sa plasma_dormlq
* @sa plasma_sormlq
* @sa plasma_sgelqf
*
******************************************************************************/
int plasma_sormlq(plasma_enum_t side, plasma_enum_t trans,
int m, int n, int k,
float *pA, int lda,
plasma_desc_t T,
float *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int an;
if (side == PlasmaLeft) {
an = m;
}
else {
an = n;
}
if ((k < 0) || (k > an)) {
plasma_error("illegal value of k");
return -5;
}
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (m == 0 || n == 0 || k == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gelqf(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
k, an, 0, 0, k, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmlq: work
retval = plasma_workspace_create(&work, lwork, PlasmaRealFloat);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_sormlq(side, trans,
A, T, C, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_unmlq
*
* Non-blocking tile version of plasma_sormlq().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^T from the left;
* - PlasmaRight: apply Q or Q^T from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - PlasmaTrans: apply Q^T.
*
* @param[in] A
* Descriptor of matrix A stored in the tile layout.
* Details of the QR factorization of the original matrix A as returned
* by plasma_sgeqrf.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_sgeqrf.
*
* @param[in,out] C
* Descriptor of matrix C.
* On entry, the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_sormlq
* @sa plasma_omp_cunmlq
* @sa plasma_omp_dormlq
* @sa plasma_omp_sormlq
* @sa plasma_omp_sgelqf
*
******************************************************************************/
void plasma_omp_sormlq(plasma_enum_t side, plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T, plasma_desc_t C,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("invalid value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) {
plasma_error("invalid value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_psormlq_tree(side, trans,
A, T, C,
work, sequence, request);
}
else {
plasma_psormlq(side, trans,
A, T, C,
work, sequence, request);
}
}
|
luks_fmt_plug.c | /* luks.c
*
* hashkill - a hash cracking tool
* Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu>
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_luks;
#elif FMT_REGISTERS_H
john_register_one(&fmt_luks);
#else
#if AC_BUILT
#include "autoconfig.h"
#else
#define _LARGEFILE64_SOURCE 1
#endif
#include "jumbo.h" // large file support
#include "os.h"
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "stdint.h"
#include <stdlib.h>
#include <sys/types.h>
#include <openssl/aes.h>
#include "sha.h"
#include "sha2.h"
#include <string.h>
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memory.h"
#include "base64.h"
#include "gladman_pwd2key.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 1
#endif
#include "memdbg.h"
#define LUKS_MAGIC_L 6
#define LUKS_CIPHERNAME_L 32
#define LUKS_CIPHERMODE_L 32
#define LUKS_HASHSPEC_L 32
#define UUID_STRING_L 40
#define LUKS_DIGESTSIZE 20
#define LUKS_SALTSIZE 32
#define LUKS_NUMKEYS 8
#define FORMAT_LABEL "LUKS"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define PLAINTEXT_LENGTH 125
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE LUKS_DIGESTSIZE
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#if ARCH_LITTLE_ENDIAN
#define john_htonl(x) ((((x)>>24) & 0xffL) | (((x)>>8) & 0xff00L) | \
(((x)<<8) & 0xff0000L) | (((x)<<24) & 0xff000000L))
#define john_ntohl(x) ((((x)>>24) & 0xffL) | (((x)>>8) & 0xff00L) | \
(((x)<<8) & 0xff0000L) | (((x)<<24) & 0xff000000L))
#else
#define john_htonl(x) (x)
#define john_ntohl(x) (x)
#endif
static struct fmt_tests luks_tests[] = {
#ifndef _MSC_VER
{"$luks$1$592$4c554b53babe000161657300000000000000000000000000000000000000000000000000000000006362632d65737369763a73686132353600000000000000000000000000000000736861310000000000000000000000000000000000000000000000000000000000000408000000104f386b50df3fcd9132589a934851faaff16709ff628ed0b628b0d7151b3600c0b3f95d8404a8b35fdf5dd6b6ff10f4c352fde11900010b1762663664393836622d633836352d343261622d616534662d6165313336633938383735360000000000ac71f3000430f5d9c39e349b48d7cf1771d9c152840b389a4353ff186436ec75cc397529ed40260000000800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000008800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000010800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000018800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000020800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000028800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000030800000fa00000dead0000000000000000000000000000000000000000000000000000000000000000000000000000038800000fa0$64000$16ZmSCXd0RPSNXdRfTOIrQHCXSDZypjONZk0Oa/f7c+MV25Uybp8nxhF1Ez3+C5H8cISxvHTq4uSMWrloHMk4i50+n8J9B5Zm1XxZ3eVm908kfxCDGOz3SRX52e/VV0YepVgzCwxEpuHQRPfL0df8j77TrMdAQGlnA7WGjpn3RLKAHVNzC/z1ISQzgdA/mHZEUUvrswqzXQ+uy6bAidLqPmHfbRzso1NFFGY+Qc/twTvWmM1yvxj6Ajl3Iko5+8TX9MwC60u+U8p8Bcg98RfNhYz4/EzJ465ZIn1dJCBGcdsn1Hhd8ibHw7iZ8E7Fob/ij4fzeh8MmpVWg2tnGIvoWCCa3HO2/96LykfPcEafQOVpClOBHHgCdi8NhhV7SgQmP08cf8LDZXIFL9c6bmDns99cWOyWByNaaPTQb/A752FAdhepwbJFK/1X7vycFs+pUY/7vmeW+uoYOxAuALHT4OeKgnzg15GvLmRKoyLueNy56i7kB1rYrIgjNfkWznOT377awIw4mGJ75NZwFAIG8mwTS4QQtcRHUFvuB8MmvihSwPcIrtV6F+TIz+8NZBaAd9m8kZf0eDwUInOdkKlZ78Fqr53o4gB9pmMD6TvOL8oLianIFK/mA6pTsstbCg36qauUYI274LtQRKyet15zTtqRl7MIW2yTxVImEZDpo2QUhYuMPgkmu1CHyMinpUahVHADvHGZaob9J2PLX2956Z9fKpZ74caeigL0mZ0nyV3x7+AlNeAI/VZ1dtObUPtheRb4/+h9D/k6dUckgnn+xMwfBg3woO1h4YajS6zPWFCAqL+RZk0F4tTtjkD2z36jiqbvqWAkDpySdTvrLqWRGxoV6R/TNy1Mc6XHqbJhJWbNfgnpqNEVIDbsKq86LJCO3G0PrUDbTzochlho5bLcX5ZXUR5Jp4NiAvs/nmUWdyoB3MlgW11OmEqJ/OW++h9bUj5jZAGE1ITaDiJ+Q2gq/lSUeiq/KaL1cS6w7AfKjBIFEBKuzIS65832CsnxCRGiIlDE6jCdZ1G4pp45AYsJMB89hwck7koSRSP69/MpErVWD59x5CrURE433QBAwNVW9Stn1Xb0p4FFrVyJy2EP6wx4M9L8/+xDHDEoOF/mByByKAKFdaod14Kka7ftfKHQ2gh85dv06iT6JfB8u4/HR27PHfGaZh+1OyWrO2FOSjXriZHbb4IpV+gjXoGUEJ2Qqbluj32liDKt1/HvSjjiRC2/R5a2fraVrHKmF1ZcuJMHFH/bMExTQYDsDa6fVBKpqeg0m7uYOO3ioIJoJ/VvBj9ZJwFkE6SSQwos4DaY50h674wLe1Ro+5p7z3CDlhE412XYgWKucIIY9GVcECrg3ghh/gR6WQVO8E2fJN99jAfGCuD162nSiqjTP0W6ghKeQsjKmhtXqPIp21NMq/O4A6nVSkCiOsSmm63IlmR4sjowhZfCEbYIewPBE0AFDuIm7ZMgqF8eIFX/jySxPduTWnwlmQki6B/lrqW1bb8fl6lfp7Ko1ZSj2hLerZ/dJGEsYIBzvL2yj/JlXSwzEdCfMkXQ8wvAZLlrvU3AHiZSeLCk0gVQsabCnQuaQDQgx2G+TeN0CKF+KCTGlIUbTS256wilxV6Dop12l3os8Kd4hDMt3IW5spWATBKKojypIKxWXXqyfObAP7S+c2fNqX3tMO2Qz/4LpyFSHte3MrjPbBd6XlNs2cj9zdqNEpPPeN6XqsdIZVRJ/esrcJ1TUHPYyH/rKJjBPMgi4ap8WObDdfg1HmJDWpvBD+YI/WrUoPxJdYSHpmMfjVlgykISwXdUWZdXgQqoLuXKzyNE4Rt0uQYJu3nH9KGAMPTrqfqSUDdyeTcVsHfMkrGcu18tcNip1qvFmL/e3LEk7zeVGeVtXJ0dKlm26P3w2I7xBltX2t/OQ8Utztqd8hgPHZYeFFurSUT6IHcD6VX4vrTDdq1RkrS6YHhJ14QseEzXcHMYBJq6gja2T7aDKm9yEQfwL1dkGmtDqaLtDoW2i8irPMvLAM6FnY4kfZubu7+IKMpHZPIt+x6WBCAXxbXZ1QHD6EKshVNaj+BNm696Z6u1C7EI5WQ5jSSnvoShrVxAQPievqoV12LEoqT9fMdMGBJrnKkZY+gVZsq3BoJ5njzm39M1/i2A4cpJJQEGwuf4vvgAsOKf2zk2spfkfAHhbVxAyB/cdd3nMBDEds1ow67lBzAT8ae+Lxam+EzszCuahpQWPPMXSBiTteqGjvJEflmzeD10Fxa/xEhPWXWkr2UgbGeI357VoaQ8g9WUabiHQRfNpTCd5yAwcsBZBFaQKfx6nYNxsRDR/Ii50zyIDuKj64Dwdwya38Ov3KOZFZ4q3SRi55dtFHpCa4sZL375h9OVIsoYoGxl4aWM2lsMQB4jOaVy8aFj1yMy3SB0KF0Rg+nHsn4OxjdwMDSFp4+hvW0r+/OArn1hZrQfsejmC57C5aVNRyw+9NIA3PmykTc1h+LHG1dTg20DCM88I2YgONs2PVajgBdXJDftzVB7N4S8icy3VH2dYDcO/J1jd56eQ4hnKnMzGAmtt61Zinx/kY/8qv7Kha8yoKiDbbVHJHkQZoTW3gKPc6FgK/FelQ5rUlqaWBOnsT6/PpzzQtckVkbapBUQdyIff8cascNGDvCwJB/ZFe68Yp5eIeyTQRZyzKOd2i3W1R2krthpDjS36xyON0Fi1DZIcE+llT0GKQMznIBLdDbwFcqMt8OCqcn2HRqvr/kH4AjCwnx+FCHbO6XH++cfXulwAFLqgEfRoD4Q/IoLCHPEM03ovBYQCZoWNIpGJpjhTQDp5EEI7cz6letF83rTK4HeQWizUEEoJjgC18nYK0SuX9XSo8we817I/KnPmX/DDNBzjp2W4xXQu8DtZgb/fe5r4urs4Mrx3xUWzlMZCE5NuF4toNh961PEXuVI4nb1TeXAbqmKz85ERsd+uWtncBe3MTxlcjrYGwggz7wUX81WIX2tN5tcXbYzK4W7A17p5VQNS3dDjPERMGTKl2rrP/eDw5GDPxXeSSX8oT+IT8Rr+77WnoV3hWNFzYEtDxXg31yvXyQXAPq0OJojfPgerHslIP9znP+ku/h/Xtkoi+nGno3+oeDdgplhb7dEIG00m4NK8dhQwX//fNCukJudYaux5xFX49n1teM0cGsqXdORKPLWWaxBlTiXPYno0xaeUHOifRcemjSSqB9pIfmNxSfsBkFnkHbmXPj/BD1S20Sr5+SE7hh3KixPjC7ohESHuipWBVEk7CYv8oFeV5EI1Tos5U9VQIAS3YqL8eSmXjrUMx6Vtp8zj4dY+J+RBUQ8Cz56km0c2uDxgmhzA3VVreXyD+0MUJN4ElBaRXe33wRkp0JQM2+Btdy5cBDRP7qVMpEjYndvSGZD+UBlaFTgLpSnvK79j/DOfHENTHirK+dknhhshDGpZZlONywfH55ffejiOiCEC9zzdvREKOjhrSfUSPWnGG0Rb5tuv0ldHkbb4YxO4JyFtKZJVygB8OlTY1BddEq61RRLpdoUlvsXlLeApWUNFAsuxM9md446iGNu/Rf3RoMxL0NzVmKZPo0gT+a0wW2CHEq7k/YvHMeg9gcRuvtBWSKUkqIgecaok/rlvZtOVJ1/EX0uRpxfkkTcy9I+MPLiKhZyqSsM8OwYjGHHOWSwrfu2ieCTEyVgqzebf6CFJs07k9/hn2CF8kbH4ewHzNJtvQE8To4VGmGGuf2lFuLQ4CqtWgxcLqhVZ0CW0ZzSCTVDeRXWqJSJifAdjQAcm53cX8I65dhXvNjD4zHyypgJAb7otbNyjuuzFS3TXCPOb9dd9/CAglgwiibaAxqX3C/Z5QBVo+nKm6iZeKRm7Rp5qm6I8xlYkatboUkDg84SFZJvxM0dpP3Fqv5eKkk6jRKq81hOz1VRw5htatwopRJpwI3YDcSc0JKT/PaQ7afMH9fAPKBl9+QIbdnFKIM34oxZei0WsEQdGAQeGDi5rH9fs3wl4Bb2OfUSsInrjJAy1rsxd2UUB3VfTlUZDckiGxVL0eISewKDHmQs2c+xGMbQZ1u5d/mrWbvA1fCGor6y0FWxcm6ysW5+oE0Ninkjtx3gsEBXK4bAQNlaqP+rSi4wh68VhdvLPFFOZO1u53jb4Lkc+wEN1V7EVBuNhR44TjEqQ5asXZbnWcFoJr1pIo9+9Hh0OHlRtz34zGNrtXgVDjdY9voLTdMCOvd9YaRFrOAWlmSwpMpgqEUHsg8T+9XXbWEcKVEPEvKLeCbLUSLgRF+EbpJ4ddIQxO4GDVr+FKrXKweY9A6XBhscAg6JyZ5IEnc1TxTR1Tmu/wSJnprBWcQ+KMdFEXbAFbkj2gsPhJjzJajWHWa13rKCrStu2cYwN41U8YswG4sHrRdMso63SUXX6eNIRVeaHvZyzPZX/4hzEVQ8YJzevhaIE7HfwLsVGmozNUOKdG7lBR7gKUmsO5yTfdsFmIkr4ylTEFzYQSMKhGvm9HK6o9C8ubc8/TZ1kwJzvxRoHcpFnIRq4rDDk49s4+yenQ0atmHVp+vR7vxErssKkJ6AhmJVXITy8G8tcnXvs9xDJBw6wj7ku8JYAjk/vBPNIDwhyQn5z0rbgr7MyAFFrE7LxGzCY2XpeGIubR2d0CQJN8v9LuNhPBJwUKriWfnFcWTQ82o99udMzuYUQojkdrIA+QIdiDnaJfOsySUNG5jYjGMFPoTQ8E3ROeP8Vi4v7QisMzZmXFq+8RYEENjelwSNZZvablrFSlXnjyrgeA+RaYsKOgmPEi2x0DZUXpB4GIiWHdD/yUVYa0pvKLhNIE/ZP7Bscz6bPK6OS73/MpW8I57PRLe1n9fA1tXc+sGAxKLNzkRCt2tDKSoGFxlYh8/cTnub6jJPuf+glMGBJPRa8ttxVoRqedCymGKKevg7wEMaNOjn+Iw2k5vm2ZeL+KO3eP3iAI0V+iLLJaozS1vazuFsfiKAzgPZc5VLypSzC9rgf2qeqpJlJvmS2OF/LjXTgj7lNTU6CwcYvJ0xzjOKhEoDf/EAhBYB0OKbJk3v5Wa+Odr7wAop9rS0RCj5luxhYL5RBeP/l5qPplrN8Hq890f6deUxOSHjbl0tiTluzZE/lo0Sreb7mmEx4OO8+F+2GBSWEtdaxsmnB0e7VNQG8hD9A9Cq48QAE4qX8vno3VKo7vvtGpOAlGAz3h2CTg9pKQ1TAGmmFQaa5JXZlFKGStti9H8oXrMXbXGz/2HmU306jwIxah7vBUOvRBoWS6+pIy8UNh2LKuS7UDkEGz9gBYTvAIA2tTZU+gFZFtrVfGERP4YAyxH/UayVMiSaU9B42oEUsml88Mv5bXRVtUbpF9OEjk1PVzG6XnVqCZDlxKkUo7bkV0sV84+XnTc9HrYnJMt0m5amB7DxWVIziewfHIauDP8ggTaGHwE1FYmDYKC+PsDqQipGHABCetYe23gmVc0DLFfZ3mAD/0jYXWVE1Dchn4txB/5dbupiZot/4cyuiFMNZOLw9UMDakSnTWjcXOV05mKJoHsFEedpVIgXtP/lSJW/cSOARLoGJxB942vZNlOeXHrCviMDxbzLU/E4mp5Og1WuNSOjOkyV+KICowmNXCzeWADvRcmQ2D4ehKE/g4gtCx+bahlSRSVD7qjKXed2D2w99+FjuA1Bga0PVs+LdWJwLZTIkvKwEr0LA1Ai0xlXpDPYxr5FZVJdQXTw20PiHMs8PFbnLBlz/+Q5zkZBOGeyd/j0NhgB4LVOUOkGS033TwIoYPbRq4qSN26QhDwcKjxlh/y8WSt4T4VpEbisvXyfcQ67wzPT3hmXYRZvlMvPQO8QQ+gm8pWqQ70csND9eXWLxufrm9MZJ0Al138IaAJpe6SzIuEAwh00x2DdjvTE6bY8OHEKoiSTv1vtXkVePeqNkEMEhr9XEs+aKa+hW5j2XngTfc70GXR8KkoDP/U2Nquhc9M3MElPJFZdjCn+ubm+2Z53SU1s88xDDXlXFJFW44eyJWW4rfOph9UiXmT6XRDVRxf3YSSB4bBtKxH8m32EQHz9V4g7kR4TactMeQ4nj/ng2LCSdl+HjdlVN6t+o7ZlEBt5MwAEMADHu5Zmv8ATVkfTFt7HUlEv2rdq0468BkBUE47TRI2gJ9S0MJe64iIgw14ZHMbyJvd3t2Uzqrwli7dVGaLiOnGcRKBkqW17GWzIhIQZvr22NPRa+tniYlxJu5rKCwED4Mc0cN8mTh0+cnDyaib3fYBBm1qzB8fbV3tO7zqfFRtBnh5TgFFldL0uy6sVIsiW5s4x+6jD5/ulXPMsl7j2Og71OKv1MsH2HGZU1M+h5Amyg1EWAgvqK8v7bENT3M0Qb0v5RP/ALz+am+LsZisiS034eOyxb+8OSOuL2oSFKUDEXMMRDJxTj9NawN+QoDMkX337A/7YKtrYclzfhsngmQf65oBisGBdO3aIcCYH/I5FdZjjfrTIs1BoLz2qdN51M7qkocXvz5o8d8Q9rhKxo6hFRkW799SKyoIgLFZnjt/Z5VEW/yNItehEcrbv2MCd5Rx+GqJC2I6MAV9QYmErsOsS9tRFWxFUB6WYWaton3sx7K2n1FAqQxZXuF3kExxyFlhVnbFhPwwNV/xQ7qEGSmjY625eSByrxajVpSRhdprn0b9j6BIADzXS6qOHsttS1iVN705oAzLvZLv/b3JtMYkTCu2XBRX8qsda/4tvCK+V/WXE9ZTutujmeKui+qkOJSa6Bh2m9pmUQuR534M4+Pkh3NCISZTL1rZdv6ADu3nHWDeo9dr8Fo7HbYllyR06D7Av4ekBfHb0APaKL3CV23Dtfc3+T7DVqte41HDbjxhGEwCW9Ak46bgtGqIkJiU5hMYRahSO2IGNsmxIwuq1xOei9f8e9Ml4ZdXPriTy+WxSva5e+BtNSv2UCrrZ507bxr+GHIyajKWnGa2So3izxzuBWcYsOKbQAKL1lhkdUwoiGgNBbrcQwkDLXd51bM2TWFSsMYBuBhLDQ/SGUPGOml8kN3D+cgKfds/yAHOTEfyIUJwHuJpSFs9u/mCHCt8mwVWAZwIViPx6yrqEgPsIjy03sZCS0IsM9HxNzZlPBRdEzzsqBavjjVVfqQ3ue9JyU31EtxfanhV3zFieey4h05cHSqvjicrSvQ2n82zW0AFgJ5UUiR1lCBtXdI5tKTFYxXh8cipP43jSXeNHShpfpx9pWS4uvyBdEKFJf+JzFI17zvlSP946SQalxccP6O+2Gw7VnKImZ+4db0ZTdwtTtv1JWgq5LcQaxtbZ8QthyYY+DlSBCjxFZ0R/2PuYGDApXvq8O4qY06+l16FX441l/VUTH/vUBYIPWsqZ27D1ReDe1ZviWRRBrrQ8LFY3zvX4Oqt6DUFpU96OvCgt4ABX1K6EjTgipb13l4mZ3tR/UAktUcCEEmpYet3gk0id8yVZMVSCghTdqSsfXcyKrRMmGiON1tTy4kb/zwbKkuY9OmD95pVFOd2FfyBYsY8Jbdb/W09YmV1cuWAWDXxRHcCw17vNmKwAQ/F0hC7FWwuuqYO1cswiuZUleXgOHexeJ/WzidCdfwCpamvfWjgSeFL2gF9XbNZICL/lBGVi7zFWVAbxGdL2+X1Xyb4xhaflFw1HPL6O1mbttBBN4qN1oNtZxjKTgfZ/XJcRM5agNzR+N74tAPW7wtyjEibF6NQGQ++1gx/hkcpmWAl0CYask4R9jRyaWdrfhSXLpA7RmQBcpz5v7K+hzlZ6StGxjUepbENVvIu836Ve6Zr+9d1zDqQeUjDfrT/4XWgd7VAu9ScZAHtMHaCeCx4L8oCw4AHzoYLK58KO4jCIL8+jnZUzLaIBO2dJ8S21FsLzd3zjgepzwzZSbOD7+bZY2KbBB7voUERg3byZPc0ZwTSak3e8Vhs/IXTqRnaOyIPvLGLbz9pomXp3+n7yO7ZQ2edhLN8dfAuwUXe8HAmMKF1EvPZ7MA8mt/PYw3eJbW8gjovVcnWm/uQGEaPBIj3b1a2A/E06GsMKu8gxoWwmF9xeZ8AJGNY2nwT+0vRo9eCaBhos/uZXAPhlRcyzPoQ0Lgj7ozf2lEDpG7tSwp46wh9XMEPW62v603MGChxPPeJk5hZqD2fmp+3voVrXKbsge8NphWKFxJy98XjCdAuGlgK6PB3P8I0/NrnQmBP9dQvEmj3tjvqf7YIbh2PYeO+VGd2Zt+Fh05RWmSIDOJ0dYyRA77/naqmEirNfs8Id8KNYkvGc37secVd6MM+IyioD2CRyyT//XgKK83OkKcPeF9ARxb7TsdNlBTG2xTqeD7LOZx3DFyM/icbKa0NSERfSut9IHeybVGqFlKJOPngcwq+ODJk1FWb/tkYJahkritxOKaA9Tv3RLLYL7zu6CYIzCFtb8xdbRfDRbCTcNFNOmgnsNaNw9EaPACzKlKdh0NiB/p7NitWnDy79UbX6aDvvU5+v7EccDxiS/QtbhLbE9Akvp33W9Nn0G/IMEWlthe7ZziF7zidLqzoxDZoFSvdaPZCMh4R0wrbRAL5MAEU1khx9NRqmNQGRI2eY6dcl9Ft134Jmg2Qvt60aZJCmU6HeeyihxPf9S3szbi4qKZHrqRYJb+aDE3+BLm4wX/u5bQg3UQ6nyN99m//CuCmtzoM+/nmNzD7aLAdWxuzxv4KnQzfwVwuvKzmWLQOr6sw7zK9FVZ6EyVa7iXsCaK1rafAWUk6JdFpxwIa0LcZvyRNwtn+VXF6jBg5/xjJddHEV6F7b+w5CgKNrW1tOi6qHf3RDvKx6iAiuEaE4toE9XN7EBSNPI9BT2Dvi2kNIADbfoAccQZSsA9pVVpFGZFfnINV7oa8SpYGVawIjUqW6QlTF0PeaEUv+IREJjQlQyBw5+BHhCY9xOn3RzT1XaOLxpZ3wpDmvZ02YbAH/YadOUEWVNYNobnqxdJcHqLRijxl8fb2HKzreomOL/w/cHVYUWWYu6BoK9iLOHsN4hIY2/scEREKk9r4N7rKuajbxs+SSZ1mkuLArMCJnev8whLbqFCF5qTNStARATU1rI4ZBDgfeGD+YW/JcxgHWAfkmsBc3ZcLWMvKEoX0qSQhstbsK1DEDoT4K/l+44sGZdkC29mM7Yf42Y7Ncm1d0VjMOZUGgN9SlC2d/AhxtoFMFoAWEFo4c/fBaj5MuWdsVRVB55dqfH+sZXiZl0QQeUJxya1rKfArAu8sdShwV06y/Oe8Gpjd8biF2Fmg40F4PdWnxW92orlF6yBYDuRX48KawMfMv8PE4/E92T3MUhe7X4wdUI1K3lfwkEkKyD0gjudimZQlXGJE7HB1a+Ozci0A4xysUJkXnkWsRl4kWPwYPBIhGoxWD2bJcWzTOJuTdcO+4dN3S3pnLMpQMxdUnAveaBCeX0xbepgSHhPIPy9OC9KNUw0L8CeqybUTSmIQXPJ7OvMtNbxQMdfquIxieZsC1Moa1MxV08MX7pc8ybhKDp8HoZpUH0JBnaOE13uYUZMcM9MQFEeq/Hwk1E+B5ZP3gOoTbidH/3jZaZHE7ldmKXihBHltLZLgjWsLwlQym8BHOpe1qy6r+JAFKdmy97Dm2d20wL4sAl3xMvmGFtb0QTc2GVEzovzitSG/w6hubuwRkY5Nkr8qV1KNbiPWj4BX/AaG9GfygVF8Y1vXW+81uxR/dN0vca4UcP4ctUx4wYFdpqsprBICfzMOaB1nl9KGfXUL6DVgJ+RfxqKvmDumLAInq/cCUbcabMEZK+8EPBSEuiiwFG7LmBqVsm1rgBZKLch73W6jJ0raGAANx7QWV7wfmGlWsmEMzFO0OX+rJRcyNWk1qPfnL+EC6yFGM8i0n5dUioRQFAL36yxlVMklOYvycryTY+99sZaaclIazAvhoyWc/fT60reJ7NcwSEIAB7UhtR+iuKEDBQwDKneKNiuvbwAA1mtubgkLLjBLMf1Syo6VzUPMFI9CW9D9eBsXhU8WYAVMQmm668+FpQgfa5em3tzw0/WUz7nky9LrN87+LjVBQB3cAMaV+pY6Rx7iJU6h+Aq/a6eO+uhGXBkOcNcJRShKQaiMr3s/Q9kxkgNX9+sAGZP/OvoFhTwYGP9MSS7tHH6XZMk4NqYnHrNR7md0S7TC2VPyVcP8r7bukAlAKqU2iAOGvCeOVUthHK4VtI3tvLQHm7cDxEopYUAq3OBNqIejmsZ05uwpTpFisfWcRYtuSX0VTeiqlkEANL1wcq1WAryA7YzXqu3af+wOzBk2psVHkyvTkeabiHkhYRPzEGQPOzGAwQMygZKUkSQ0CoqLeO5OpBblGpHokhkuDcIeM79VcqfKoIU8fJocyDex87rSnkrUAaIO/WDI+OU9TWBQTBl4Ui3V/ypoVEYi29CdovSoWRNdS8r2dYivVkIRaQ8XvNqNxEuH4cYXzpCY+Co2MM9wHHgn2EMiIGtpjU8aIft5lVSNvg+Cdpd/hMFkfukV6EkNiuRIBB9Doku7OFVj7Trqt3IN4mAvXqRItz3SedacGPU/ihGnYVq2+26BpRnBQzF95Ld8yvIzp6+vebplITsEWywUNIzQ29BO8iuMwKd66Grd2kxeknyMCS+gEAiLxYgB6yohNS4ZbE974Ttch6dgM7huGIrijVrHYEvlTk4gd9gL4uiuStAFrgWZpxUrJoXqsLCgVVmUgGm/AsGMDOrIePoPmrKP9mI+Nsbf11VzwYDoby7doWLs80seqFT8s0efUwesN8mZ5NfHapg9cGuGCvUM2CPq5trVJsHKN8itFLoIiD8Pzwd+KTSf6ZxAx3HrPGRQQuSmfjCXF+uPx9Mj0FIq5iymOh0gYeY6iSkl1YrQNiSS6STJmdIAivJJqbPrTQOZIttS0JejbXuiC62w2t4mnts0O2SL1x1iS7JoC4DD5ThCWCYZU98abcSWjYT6C9AADDfYQIMWTtNq1NCX7CaK3aEx1IY17+wxRjbdlE4adBavxxr3tMFra95tL+CjzTlRe6vA0Cy+HfqV6+9bHZjuG8wI+A8bbuv6ShKJv5f1DhGMZbkzZCMGE9llvm1epO+vgq/XdZNepu0YxyWnTfO0FTIITIenTNo7rKh1RPEEoycJxkdEyauoXAG0pGtQt/v8dxDszke0735gEWWsA77FkDwZfsMDva2V9agVmpTE2e0czoOubBuVpkIkqPVa6Y6DjfL4GYagCXZDzTmFFi/Zbl7C8nMmtX+60nddQRJgHyU0l0wmLMJbWHYa5M1pMfwG0pvrklo3XoGFn+0T+EpcF30z2CxE85/WnJJSuPzDvToylpt4nC4RihnCwT+5meGgIBnxa4udJ0DAQXD8hPJhoX1zkVIW35KcMY8pEnfb+dGySDtRzTzsaTsnzgnSG2ScxUToSRuzQ9BfHpb/nBTGCgoJMfmjQyrq2CZOsLGXngSvLpoP3OvVMhif9TnVZyqYxCJllf3VT926HZUzpI3Oohn/kotUn+4NZoZsFwrPU5Ljijnju8GOaDGfKniub1TKPHwCHLEv9IFEsF6lAajxxJFbm2cD4yr2a1ekHAp8/QZmHxyDEc5uCF7PdKnJZYdhpGPhvQIjS5O5zVytFd7huWFE07qwq9w/56dACZa1J6UQ/86GO0d2xGkc6goaBwebCJyHiF+iiOIdBsMjmwOCycwk4T4OjGovACooc4cwzwUYPtYFJ4591JUR2ICYZRZNtbR1xl1BZHRzePrQqE6AM57l++L4mX1T/OJDGUt66na7WJWIkpOONoZMTJGfHcs2nKrYe487t49eRYqat8fDtqED6fFXiRxLZnHxlEtNTCzmiY+WP0etwOM85SiJ52lGVEaXZ5KMcrZ1PIDBaQrTNTZ7RZftz/xpTGGXp4lweaE3VGSyjyrguo3IW/QcjydBiBr52rtqN6kEc1Le04a9ZlndHNxAE8UUyeKxTT2kucKGHYo/n6BCG+A97lNAA6HQTZBQ25ncFpz34eXjHcGpMJOlM4qomBkz2ek7P9Lyuw/cTI4oO3J/Bs19TIwiDlYsYIq3N7VeCj42aOxznUNblZJVEIcMzTyfzqfYlx1ATWyP0Gu7JkEF5CrrS//1ozo0G+J0vBLuVnUwMlifz+eeK40r92DxXU7F2UuFRPKQbH0S0TkKXLt044sUbxisLPTDPy+h02pj3JTzamavsBpVXS8mo5yRPUxjAuXMoEq7UfRb4q9R33GfQP25ir3ZZwnxWMi/85/WNmNXc3shKt2HLXOafdsvSBPkrJw/J6BDV59FAZySxJOvv0Sw0PqMXAymtL97Cze7Y1u8dIyuAv2FAWrmBp87JeeAl/+2krQmONbXbKLtpi5XMNhNs64QQvlM1XNP5oY8J1YU+1NTbV8++Rfl7XCFHmpWi1Rcc9FSPIm322dAYf2SLMu1xM9T9vy2qt7zN6eUe9VEKlTGAVip6PdIKCx138BV+LEEysefD+8s7LcSC049bxGy6qxVpxKMlqlzerU7kBKxFSQB8TFwRwC8wt1tAiqHpuJFpPjVgIrFBN/Di6zuzdT3JnaSwBe/Bmb7BjBX85+bK4CE21kdGiLCOhT1d5h9QExL47TV1R3ZuwapOb84iYYCnTdD3xUuHvMZ6W9WwY3N7qFTdpQtjPQLx3gTnIhIwYJ3JUBmiw1tIpPF4gCnt+td26lCV7Ab7Kp7JxNQT+6JT/CQQoOLGS+bhuYzu03xWfAmfFAibrNZGpBXJOfgYwnQwI5oQJlYT139wkzMP82Ige0U9QALgkLHjusvYh4iAlNRUslUgKmv6Gi+Y+jWPhWH0F1S/8jxMr/J2hOigb4LaXha1V3qmgK/STNE0kZ+6lgMIRnkumZDpRpSsFOPOCrai8ro94Z6t2Xlo0F6P9JVubNYhaPN14ZRMzj32pGaCMRTldK/NTOsKaUHfJtJ9lCrdbpEPd/Z0jZC5tblPsI8ciJA4kZffvTg4gMzb45dG2j8rb9qp8v/tnUIZPo79b6S/Bc7Po01pKgC0AzJObUhKYoxtu4PvXpyjqpOxMrM7eBgwLpOKq/qdBFFHKnYqQXPLyY8NoZiy30ctHMpS62iWHmuC+T3VGgKI6fXdGeCs0/PkAnAl+ASrm8Kcy0RUUgBtWPOuT1qB4E/bMMsDk19/3bAbFy4MucB/26jUv/tYEy09rQCKKhtLRQkjWcIAAmklOUCuor7SkyJfjgh+oUiHNzXUbSA88ZouS+TVASspb2+J2t9J5CWKYPeSbbPBr9XU8VlCTF/hqgKFppstYLPzKcvGffahA32ZLtGIS7J/WfItXkOfqltwQq4vgWB8kJ7pOyW410VLBS2EqW6Y5pVKrsABb1uSHejPJyiYKa2QPaRNoPCJbpillNsBLgzqHcHC3BzsrvDwcyUEhtx/E9PIXVnMehpAbsU/dHZvuCpHCldr7qbFwiAv9Ll1lIwyn89blYGqxTfbAHSPe8UklQjpnCQxm4EVUp983lWZLfBIadT0XkUEAWsidN7paRdS4EssHnaWIEA58+THc4z0jKP9g1cNNy9fxuOD6XCbVAgyFQljK3KaiYE8xxJInOcr31lLerVeb6dR70a3DotNuEtUo6ZzLRcNOkEOnUzWDymLRjrBb8Dqj0pYjYzWJGbpgTQH0JxeCXXhb+xzsz0/cvcZzMVYEB02uEzpB9C26hYk7dNlig+PrF+b1yR+yGZvuBzTrU41W1KahPAfNg6Yh4gjirZKZZ4B/yahzbsWNy88H2FL9U0a9oIMSFy+sSIHxP6Mod5wlrBTjodIg0rzzY762G4JyQmr4qlD/xdZB6Z8tdfkkUGQYbZDYZyT7uFuThmQKdgUe397UxLJPjZJ4+w4C7uK+LHDLD8wTeHYBre9yrdfPK0NbOh/NVZTIG0BSolpUij+3zKlUmFKOoUFenhQCJzAjxgWifL82w8z7JpG2R22OkFCaZZhQOaFLdEaxLUQNb1LvuqeH9X9T7xAgq1yPKA62skW+HQYeU0KbzoReY6m53sReRvfi+lp3p2GD+KnlNWMZMTeb1g63tzC2YkYCEHVkG6Geb/F3zz0s/GXSrv2bql72TKqvU4qduRwz9gsl1c08CQUkTXSXPXG7DBc1MxCcb81L9SOn0aLSsiQcudIuLmRQDkyIOjbN4sdEvReVztQ00jZQMr5oP3NFPbLtaYFuhb+75IM4kE0IPqshNpFbzlSN6GbDF7Hgu13jlkDFiVQus/7YRH6ZKiYzM6IUUadmQXaScAa7KpqNfip9XG16QaCN4CwAxrzPyOsibnNRDyT+hLvkfEAw6GtB8GEAqFftn9MXUfDQIfIT3rdhx0VHFoubL0DNn8EAhAW7O5FIc7ADs0LpkqgUApfVPl36U0DAseotGckdZnEqjsz/dPXcEwsF8ANz75OFFJ9AP5uCojPpLmmOajfuejwgZichQ2Q1Ggf2NZhPMSH38knQzpVzwORCXIOfrpOABf/T9RVddinqXkZn3I+00CKffbEb9qsXdV5IxT5lWOrjdBXgTWZhN0/vjZNnuj/hSDpCLrdCx4GWPlFl5LFED7CNHEgj5DlBnKOV3YEnoX4AvF80HwAsH1QR61tPjbc312wMpmES7B61om5hcH5QEmjvAaYjCUQ8FiMgfLrH52eet/gDqTNM45srYXSGgvAWdDwcRdQrsT4qaKID45e535iSJq8vWzcel2ylXuIfGSmyB6wW4LszHchuvMQfMvT9eoMFLOvU73ybEh0hX70WjFEswflHLIStL+vGJwk9q5R02HXNH73RFq9yg6k4n3YqKMSyeHXq1Wyibrlev3Ng1BRbanSdO54CAyU/Ae3aXFgnb/wNBo92TuEpmqI544lFThVV82j3k0+K6Wd4cxovKejPZIlcM9OsM6pjt3QJ1CSrVw8Ik90LxTw6vS+PcXfBW4H/4916fbBw+eSw1+sLVyS6Nr04w+doIoo5v+UWXwNO52Kbz7PodX0GKQZjQRokJyt4+eCVsOqDQW2bqTqG9zaVcKlszcQQ2EauHlTDNqPnunniad3AkGync62EIxWchS9jA1HKDL0yBNlGm23nOFadMDqaedw75KGjji0/xceoUoGuHDJfMfCCgA7fUVzOLXaar62miBAEsaK9qyWkqLxr/IgQufYpUOWVkL9e0ba5iDoN9l/CJmhw9JnG6ikyvInslMpJwZCLrWhzWU4kIF2GOG58cTn0pHS+g8/vsS3KSHmGe29DqszKzdRRvTTG+w+R9X7OUY9AGOUwSl5ooH8LYn1hzJLRDCHFE2PjxjohhwMzdK7mdKHYUENDBvVXPMCkph2elrw2zansUwuVP90I+W2t2AS0uyyvInaUrcPGQ1l2cQz5kc/+oWU2ah7lZqLJ8ibL66Tmjy7pYez8ReqZJDgHR7JTFFznD9AU0UT51lv1I46AhfUzXftHYeWOhYNgDlX1xMRAyCw4SP3KUDZlzADLMgwGpcPYatTaiyTdEjx7n/Xrc7I7GXKxIdARUAiShV9WOZp97rWvgOA+VJsbcUKPLE7bFtAarQ07HNyEQZZ7ulMkebiMb1SvRpuL5EMl5R9JDHn2b14oSee7/Ylsy+o4WActmB1oumc6nLrtKmRXFoeQv+VuItiv/vPeODdLC+lGSmn036XV6JGba7ShMdHqntjk9w1C+98dc38BtWUrMgaeDFj4wYWZOXxh8XPeywxJWf/xz7AjfyYnyzNOdiHk4FrKWF4jT6sP2ON5xV6QdgWPgHJGr/APSuBYRyaC20r2UcMNohYplHB1C9FKyR3n02nl7d64Fz32QA3YTKsl1b/GjMnuv2VU0h9/d07poE3gWd5QR/94vP0AZ3M2tkXyucQeOd68XQJl/2lwlxUnoXc+VbCQJutp+lbv4voyXyC/SOSEW9rQmDISqIIinYgWPZIEpfVBpE/BZ0gLMlg60j8nP+9zqg8xCkTI3ieLw7spioFHRYI8j/ixVYeXzzldPx2NrwD0KGLB9YmIZWQMsk/JJU8B3KOaqMCyIG0EpT7W9rgsvKRUryGacm7ts6Nii1qsuFNsIv3K7/VRCYCje88IcsY2YRPrVsyKbd20B5BnJsWvk7sGELZabZBxB0dW5dabYdM+RmQ7A9L/63YnGal7UC/awhCg3rvcZLgXnMXkojXIzeRgYP7VDoOBXJcOc3CJ9b4apQhUzBBzpjSrbzkLuaCsHF/mA766KgG/1juZc3becIjxAlViF+34cxBBfgP83pF3e4nSDCScoqOeKT0UsrR2RCjNGOC/5YzDmjqInbNtRmOPC9rPAHds0Srg8cak2kWBUgfI1SjvW88G6Fduy0qOXfIizvUUO9L5K99szEkYH+xxaGxvr4mxkzrXlR7WqZfnRxCbPmeGC8k9YOQOqLDM/cumaiLsNnAC26A7O4oG0R0YPlEg4vyooOiZ5kUxl/Cj3whkpIG0wgAUWlKwrmzaPCcm0JbWC1SMtO2/p5oEGo0T4y7naMe/dT0wldt/0qYUWnxFGjsnEtqXhpmb+LvuouoDPkZKt//aS6ktEcumKP7nmHM9RhktUK97a4egwISHkPLz6qgdMKscS5xtDeUgvhF5NBQ2GYhfAGAKZ4w/UUaHmTSc780XaGsPftMW+8I2/wDp9dWyKnqN73n3SZvnD/QNzj6waYBPSODVJco+wWXsLUfdttxuQjeGYJNA0LiWJx7d9CPoN+ZsMqIIV5tRFYbr/4LBoN+DWympp4IElgOu8Bds2IhUxaOh+jhfCymaLnDp4aEd5EZViJMwjnG1v2L8xwGvJykPiXy26yO7TfLk7Enbna0YwderDidA80m7oQXp3DKlt3Ph2zIYREWyBGtrRnbwujRUSYxGxHUXvvUUFRyUJ5s/OlUa03l8arV1dtK1Hwb9SRvjya58DEMeWu0uOk59gK5CTN0BhJ9MrrVgrzSWKOWsjsppQAp6PCnOc9+kB5n5Q3i5MkxxF5Y4vZcymgLtRl6y8nGMx8GoApSrlabBclMEu4kHFO3CE4dTX0wHr+fjQhQr9kgza3RNamTtMmpYYGJsaHGvxPAAvIB91sR5KAYMv1bW9S9Fn4ZAO84e6tjxoPkRtZPijtjoKVf8FHAjV8WaAPgQU27ouYgz/1CBvHobK12ZlUA29Iz+gNq6J8QmWWkl7Iobb8YluI+WNqx8Oku+ao/ds4Q/olGCd5SSAaYBm8RH7XGMLIIFz7zSKpK8vWDSGo11MFn6Y3BBwCFG/QAZbP9bg6MawpRp+167GcsN+eNFXxOlwjlrJUrBtvW/gBvc6AgzD+GLCLk/lW47CWx+gyiHXG9JEG8rbzfIS71uouTmU6XMto95tkPa9tq+oX+wp3KfU5WW4rtE12nHtK7MiJbo73vy+0HXJ/jMh3vGeJsoaV2IL8UtNhnVaQZso2fUC20NBEh7fC9yQhEXtGrhuskVR5ZuJI2/Xl27vvDj+XlTIlsC/bNdjrE8f2Pn5OiW3mClBsiXO2yjhU6v0pdOHPPkgxHMuCdZfN+auI9ozzPE8j7R0y7G9Ej0SlU9MpEHrAmHyzV4bm90ro2XStsYNtrMa9TFisr42KB3rWVnCokbVJ2JRBdjvxIZdH0tnGZg0ZrAIKydu8e4z2Xpu/vjxumVWqQxIkqZc6kBHYk1HNci6/WZBvY+GW2a6T/X8+j/JuBbGw2AIeMD0SP4b1taYIOeudWaESgMMyKM8K3Gi8XrNYDY21di/Ayminyr1aJRwwKU0kjQJEC+NqdwdyUTK2Re+kfwvHcisVXrGghNUMVGUtFzDVyocVI2oPnzSex/nLtetkkqdZ3ccm1scBMz0sqSifoj4scUHjQgtdacRwkYBgXoWDwW/RHx+qXfGYhn4Z2SbBm7Y/rrOYVrwoEflImaaXGKLC805822pbeG4BiWCmRTPsK2Xrwsqgh7lF22Q07AQR+4eXk7Ug6+DWWjl/g+RIeMllWua3DAFeYHM42QqSjxki41zbuHMQ3WUFqPuwj+gYSyP0Kd14kpROTlm08zLHXbdSvb7/UfBZHrJ7VhrXDpA57UouM/UXdXCW8lPOTwNU2COMeqRVoB55yER5rqEuVRN/NrYYfBwgQq/n03WcZkBg9B0OZ+SSa7MAv9oQ9wr9VxlJBlG1Ku7ZQXw/Scei2yT8hSGWJCNlkMPaAnUnUVWBVG/uVlpqqN4pVNOBU1d+hZdSXXIPC/nAB9u83ZGaA/L/PVdNtZAJm+4qg9EE32tdYOU27g/6L1dVzOOiwgNoh5ose6Bch4whVRgvo+xgYjs+MK3W+5uqA6oAaL/l9wg6O7KOK03FFy46yPuWRzoNsYurGSu7i5NZHVrbtxqx0GPOrzCtUmPI5VG8AeGA9nHRpks0LSLZNZWWBzisGquldys5qXzg1DGaarUJEalxVUHjPYDNVb8GPSh3Faer1s2M5One8aSnzJnk0AUykfgMjzh8qpKwydvLZfRGseLCp2Vr9wEqdnCp9KjkPe7K/9z7+khxdlGOuShQ5lLuyIJ0SJG0aXaXVkMkW1a6VRmMeUbaot7mmXUM2vK21GfGCEF9GBarYWT7J8i8ZlyiurWvozZHekZ9DWxZijvJU/TLMNGdox5N5I0x02ZUiBMrKO76latIf62U4lDWbTu64cHFaQI9by153+CNUJWlKxhrkS7HBNtJ6Pl+ZSCikf1pGls7uqmd5mjYoSrfL7NyRhvyo7lAZ4rrlHY7YIZYKfjuoWbBydcXICZx0bhZXb8D/uP0TiJZJLTlR5GD8F0bjwS4RXnSgdWlznmUIiF72/gupSmfSz5a7xqEi2q+Ql+l0AE0w+m2ZPWTa2UD5P12sxPKPwQvMRn82P6UY2r0sc74SRytpFn9uhhzXx+1oXLOnUxDe4BarPLAfQomOxXYtdecq3Au5+R/qleSuSvmlW9lzzg0jzb33TG4dvvDW//Ac2GMgPlHyCzjgq3LU1yCGUw7liBeKSlH3vjixZrzJgJaqQZ6IRtPEQmdj6peir/fUvZ+zMCpB8z4YbCHOa5fXmKsL2ZBhEv+Q9d/CCbG9GHJ2V8IXcmXDKwjFUlCFTxKDa1nyX1WUdJsawJHvTL5QtlfpqVYYkWHXxLDjda3nZYNV1vbFC5XV70fWGngx7Bc60xPA2cRIcV9kbB6eIsnWK7VTv+jjuVJsVnSpupKhlBd3QxWm5aJp7a+xmmCpqLXvvIZmEa143k1i+4y/Yvv9i0QJYrOLLq29sKN2wife+aBoClH0YM8fE+wwZIqnWgPYH8xjoiIilVBd6aw3rm/N6fPUX+s6/ZwSj43mtc6EyzNubGX0OSYkgugQgfHf2/SHIG9HmQRmLGGiHvUhYk78NFjN6EqFgLFSRxXiXqckCOus1oNIyMh3HEpnpPczCIlEY69fmO8VQsk7vBcS8FrBSDkuKL9jHieDiehxELsuuvByc15Our4DlAgc70gwCgQCqvimB3lXIVpIvebFKiotbTRypsCnXsdThaJ3FIUi3RpaGxT1dFSGcrJGNXuNwLUF4hKco49IFJQl3r365qxk/9Lz3sMFaUkIsMq5apHupdKQAy4Rne+WbAbQgWyq1QV4koEkhb/9YqH9/i75c/pQLUSdM+I9RIgSreRIla1aUnwfk7/rWMYVyIljf2XNo3GxYvxVneVdxfxkBRLalbUwcM6yqHnLh1/kCMOB7bt95UsfbkgKL0Sv3NMpDGLghSZrB1JNI+k/qOYwktQySU/sAivOvn7xCi0kSKRXoEj3imgTbtjhp5NzoGdpKUZfgK+zNT6gQHKvNecVEpGilp9M/RKx9QKnC47I7nubTltwLz8x+/f8JUrI3pTCvcd7h+ETznXiMtCnegtBW7F2dI62QPBKVxUr//pgg7zaKUJNYPefJ6U8TBfoTX7VHqsMRoqb8nz5IW2kuPWfRf7mJBYiiiIe29IPpZAIULU1HBGw72HYNj24H6xsq9rsvALSHbbLmcffjXSGwqy77EBCUJFzre/QAhi0w528viAMRZasp1vmp2O2VGtxSxVcWfYE2bFi+lTb3R6P3G7XSVLeRpDQ+tk7u4Fgj48fMjaqkirmWN5qMA6rVKojthNglkKTS1ltEbHWpcraZ5xDLOP26BX/JXPDiyClIEufIQq/ST6GrFb4PDRogENO3RssuNEVl/1dyb5dUcEvyT3PaB0qUe/6aPXhIWWr89EHsxZgR2B5nz6wDcckDcHo6v/jzEI66pWhAqPEYj5lvr14pWf9eah5nhOOPObGM5HZqj6AmxdS3e5MZnMC8apdgjWqQu/2MEuG1jfLtppHM9eetba/HxNj0ikGRCMQEXHqA0/l8iJMBWAd34wYB0msGsrXzdbCRD6d3JLsmLUnKIn3hMD3oj0ddRTjq89tx8huErvZYoD9Pp28mPhzPlLZkZKrPVi+F8EDDTxEaKNuvHNpHnrRQ0KSDdmeu+NuiMkiJguX5ygRIjwUuk+yLXrxq0kIDg3Tp2Ksa7wzt0Y2KDJZiqurpnsCk8GxkZbrQ6ZfyAjiFJ2oT9eT05Ku52xT7hy7mFgN+dM29bGt1xskqkt/bBSPP9VJ80kxCk6xDNDOe98QK6Q0zxQG269MEgtGBo/cCLx5iRdE6jlM1xPXj0OYvo9ZK534zlJTYiKzHVO5NKrWb7HCR9CPrOdXnCsARey0Uq72Vh1C26aqYs2iDL6SBsWTfHxmi2rZBsZFNtcbWv+yBg4ffT2YeBDNzB9dyNuGl6+Sv4e83zRudlEd8n/qZWCZPboq/6Wamx8HIHTmOSfwinu6PgeFHzmuotUSmqCW05YfwYHEhTOfhNvFxlHDvqHbXs5NmOD9CbHYtEMIRa4/rlwX/2ZHSpSuxnrAvQ+YknOizZr8i+kd5vP7EgxJ0NkiI3tSZO+CjsVl/vAP8I5p9eJPPBGKrt4F91eujpInPHvfFn2uiTgbnI0XSY/kxdEUt9QCHT5f/2otYLmSHTZtBQ5SSZymQlqpk6uU8YMUfffSxdAzHzjofORykySnMxqn26AlS2ivLFLvThqydlrH/HcBwKWZe5ALTq0TB59r9lnwnsLT1BrzEugEhxDkDl0pl4VLBD2MVt8tch9ZccPaJ9JaqrxdswO7ORC+7fJboykaTYqVcgai6my3MizjZEK3QuJq6foGuFsPnVRduG6SHZmUBSj0YUKGCYEpvxMnuv1zTLq54ZHjCFNaNnSa5NIWxkBRNhCDYnBDPSpXrAk9fOn+xJsqlMMigbtjnzLLv8nuLFW6jmyf7FwNLK0ChN7qeehHZPuMptQiIr/yrm1NtNnSth5s5jtjDWNWXxCe18MHX1vSLV9sYWMAZ6+ghM0V5WReOauXA5//gALI/ekd9g3I8StKZFBFRPfeDmeHRDB11PJ/9R3v1/Fmpfiflc+O1CWg6AKixBQbrG5nbU6zC6BFSe7/Xop6CGCiuBXLt7c4rjnjv+SOrk7MsnnkbzrNULUE4+ZchdEvGWQvYNJclz47m6eYbFLzdP/8GnRgdQWxMCCYOFZfN49CHh1/XfVC+N+VsB62MmgYovCDzXpqlvvdb8Fc7NBDXD51NnmH2sFpLzcfTHuUnc23dXOUCb3RnNrclNv0S8OvA0FaXq+ULbFNE9tBPt+m+THl4QrAomGQwP4oyFKUpN9u0L/vdHsytTr35mX+GD9eMR8AeUXAJfQK6x9XFW3ZCXcZakr97A/k+VX30FUqFyZrlfUOi0sXJqiodeK/IIPPwhvzRwocEWdZ+1LgUbCwBoyGxftDolP6xfq8t90RAzsa0jWMZA+t9y/seVG+6q6BnCKpuYWjEylKA9eoWLnzduYmMX16qw2fMW+Cfjg4CAmcA6jkDMY79GVx9Lq/Y6y++rttsxb5/KzSXo/hjYoRa02UQjJYDPslQWy3Mui+fNEKw1gB6/pOZo3LXqzHq8fS1ywLnVZRYshNVxxVvQ2GSTcRgG+Q7znU3SRD4Pmf/ukEC4yxZncJ4I6MbFedSogDzw7I91W9bd6RH2tc8rYUXU+pMbTU8wYSPilwQGxuIsYVMWck0wCsTGSNKdBgIGz3II0Y/nulRYxbaLyc8mng8BYoY5f8O61SLay6ykmx6PVZiROYF3WPVJgpfF2BVITO6/2TVbYo72kfui4auJF0nwiS6E9DR2kUS7uMHrbxuqlEDBIgRcUN8j3Vxv6nFbYLVRlFBjqTBtwnVDf8b3GB52p1p1g882RlOygrdqxlKDMu+dJffWdhK4UQVGnUU81Culy2oXn1nKPgeuKrjXDvql+uAtd4TvUnsSyYzsBoaQ/rDeNYmsQVuEJ144PcgJIIVNIqKt3VZytiKTVZ2OrXnYk1EH+iVxV7LfJFBzib2gxr10lAVNfgdxLXrgM/G50alnHPmwz1thREua1J9NjzwtcazfqvqElN1iw126c/MB3ldOzfks6zxJhC/PVxB1XDrYLqrdfiFBoFq7sys+kPoExz2Nv2yY6BhljH2bwGL5fYdgE+MgSusAsZzGfVMDfX880Llk7fQEj0VxYOtzLBrNHKJZphH0JvTFm5PaOi1c5jMJSTngPJXpdi0Rw9/bMvvVhL5yNdNMCqGsV261oW1qRn/BvHDGGE+rxh2cIQg7+qW733VV2Goibt82t5CYJ6/+Ha9X9QD10tnKjAjxjHLrjJYxesZ+ZNBS80ePQgSnyCS3NevZDnPfuhw2EejZoh9iM9XS31VenHDypmNLyl2lt7oT6vHqVhfO1za8yHuxoFUS52JYeK7JcPaFfQrQ5V/ISzxyT7x6kUDOVP4AdJxMUJIu4eFFg3I0+iJh8wz/3RBIA+y7mQsOiLD++5OebSolSfKCxDxUYw/swofn+b7FP1twfUO1rn1czaZbwRz6u1RjhJ6aUgr5bAse5IzBapEokEi61R8VMf1poMocQX2reaQpZL4JPrA8iHeVBowXVJP1L9e3VEXMEgsKoJbJ+DqSQD1WApTIZKOK12F2mmgSSiE4pzPQswZJr0RqB/QnyodFLjbuOEL+dofU1BrFQofgsvo1evEqsYHpMkP76E5SNCzojRaINZG6cGcX9ceTgUY/CDu3CxefygeZ/G9rTBYIAssYOSBpFILWiJSuNqubHnfIdgRoyMBxmyTo+P9bJ6dnVuCpqK2sNk/DcWzenwudeeN2z9M0V+4t8fHk/LuDQGUWojAQuDkLE6sHAC9klT3dXwiU5SQ1mJDjyeUhriH6sfogdGN9/eejBl2nao2tF2fk8J+1E7jPFvQb3+H9B2Erj/jHiwaklM6B03yAnglZ6fCiCO2c+gG3kFZMP4bTKu7EL3vAGuQWnYDzQOPTLAjcVrkFd8F7bceaBhWp9b9KyN5fm6PqFTcJkewoz57Wi01ay9n46H76lggjvHEgoUGWntC6iiauwF6x42Tf+QPt/tcyVXv1YQjUjdK/0XqwKtLwlA5Wwqn90swX3ja9tdZD5elCKa1FoKRsD2L51iUr8cqVNfCpCuVRwTZs7CAjoFOFSn+4xaSyP2g2yzqp01kfpSz6/Karih4JUPVmEBtNIkbUbiO62YrCfFucbLudmLZsxvicjz76JnWX7BIztB+RFAAlCDAEEUvAXj4h7KRjbGCbYSXFuO1Wfhp2EPr+JRXQgWxUdz8iFLUkzZ8/xg+1ZPrAMuSIW2CSF4nzafei2paLKbW1gqCiKY0nJs5s4lbd6tuX1kniR3IM+RmsfFio2rxl7gXtcnOPIxtuVrZCAWgnzLmItwooYECy345+4Dz3ZXJarkr3R1WTgmZrAZZAiLQU/yuAahmzAtaDEckFMuPeSsM/dNGkz9p/e/9nNKyNJ6crR+RdLW8ag6UAtxWT8IDyx5IWpqSIUXIWKwqXL/82Nw2nIq1mX2OKMLSgCWCqzTliJtyj39/xYJsJ5uHD3rLHb4ICYXe38wLZPxFFjVKJDkxYyP6t6cKMRdkkHYRcExDTgRryh3kQLfcZe9zMzAuGKuZSZFP9RWdh5q/uhaOelXF98XRf3noXmuA/x9foQZPv6jTDR6iSAScntce7onzi1k+JOenErVTsjUUdTbEuCRANdes6DVcrph29S2VcAjOJ8kWlqCqQJrMibXXmFOWYp0G4xtWVMhnceJvKwUyIz9ShwDg8BQxIkojOGcs9CR+7oOkwupMp7vasxqSPPPy3W9rKYWMOtEKcy0ENJcNgXJLhfkSY1UgBQGik+aHxHVCNyBF63m6d5XAIEByHGduxjIvhd37+jFwBqoFePEpsRUWR27JRI2OYTjiBT364IOLr5h0XH9PT6lWOiCruwPumZ2/xBgAsLEUAb+FHDU6chB5JCXXMnEloxZ2hvp4hn9BXvjg976kP5CHFaCFa7JL1DgDu77uQgb26h+dQYHQa5v17/hsLy5zwexZQ7qGonj1Y4bdyAYCOxNqfB5WqnXohpFZL0vhm0ZVUZ3Fd4gm7ESaErjPX1GLulaN9+fqSK2yV5SMT54YBvA9p7UTjZXU9Q4iRCWN6hs9nwN+sQ4N0K/uq9d/ObdrcdAgI3CMrWzoHfQZ9fVxG+OhZ3vnciGR3RODdJa/9/2yrKKeapvCu3tNxC6+evXKsy/+G2Lt9ogs3Dwz0vQJo3dnF2etfNqM3le3kT2WqtSPRCY6gr+Gc/HR1kJEFgLN7aMVZXTrGRBROCfvnwu93D3ZWjJYXcb43OZpf6HZTfKpWAk3TYWmtnAH1A5nFr4f/zsmxJLF1IrqCzsoTfZDlO/konJH/WcHd76wpWyEG+yeci30pdWizuSB5OylldffdfSLjXJheo89IJGXxmbYfE45+r4Wu1bs6JGJeQMEMgQqsq1qxodz8dV+MNk/5FGIralwhiNA7geurqMGlz7v8I7iEKj7yOc5srVtSGrv4B/31gII+eKwjrv/Bxq/bIySEtZRcOQDTNi7Cc4ljvCcW6ljK4mI6/p7LJJlilvzxIAaHR93+VfHXJOkm2uvRQR5ydgZ17EyL/HGnCsY+qSswAUcDRt5/5bBQ3AfR7TuyOvvu2UcWL+YqwwlZpoP0H03vwn/8ywGOBjEe8NrazyQ90TtJ0JoZeSOOVEf93MVDdQczcc8StxZU0Kbx2COhNVbAhrWI2szDikE2AtWgc6VLNAM/cA5x0oD3bzbX4/p2Cp5FGnu3qx635ROs8n/+KVu5dg9vXW1llRp4fc9KG37zhjOKu631NWkuOiR84IVY3WuIqAUUZJL+5bIrirOzQDHKAhKKuImq20GjOjk/WbuqTvrCB6Dku5ihDtJmkqAbK9FpP9u2Qm5kyn3zIshehJr7WgIgZo0LHWVTkrRszV6w1ixcRI1EXm/34OfKAJ6NRb3TVNA+iVIRdLLhiDXxzYvSDwwHrpG7fBvQyZw5LmTzLD0qcQOrJc2QmnEAPtpBNWCR2vwT4RHY1LBBjsdg3IjfCFjfTTd3jhvBI9NE8I4V4LpFuARdJ1cp/jp3XT5kaeAwpEFNG00uz44JioKfmG6zLcN3QfxB5V8ZNdKhoXUf1L7YDGh7haRjZCM/CGMQtCBmlpPi1dFGNItynXnHMnckTzIpGp6sOb1Ri5vs+R2GAk7jEcybAFmwsl5iDvmcj4RkNvvE6RuMN0pMMhOt799cCJFL2oM6xO1e6OqEzf+ouwQWhC5FZPuEnHdzJJOH3MbL3aM2aEjgFQfWr7jEnk+hZpjhisqTqWGh+QezjK/YgKAjgp/VrRSJd5zxhHs+3Iyj5gETQoCcEB+2jRVL38NCC7B9LOCI9LqEFnF+qqYVXonA55KlJiCVnW16EBG2hLccolccFV57I6fczqsVKYPjet3igNFutl0xgClIL7x6Z1qoRjNF3mlAxkrRv55p8SMoZuF+Psb+r/pYMGB5azPk8dLbsXRe9vvVWeNmKEbDB8OCt50c9E0gsO+tgYvtYVndXRtPgTOOFxm80u4gbZ9EGnfFRuWPQWzX1CZFi/vFc1rUL2QpDLrK+Sof9cV54WmeB2kUQnfkR34i73q+CP4ADmmvWhvU+Lhjcu8VFR2c/niPqPPMmNIOxWpd4k0BiXr0gwcSuUprUjtQeHGDCa6U4nMGcA3FMjiHph3KmXlHyuxYExeFOq8wxGoz3/QunWn+OsTbN4Q2X+5ltSwFdt/nSGS+xVBuUYcziv4ZHTNcvvY/8QaCiNugtdmEIqPg14BTxG8V3Sad0Ijc98U9QjMuMDUAtJUOr+Xd+9Wl1BrbmpW80x25iIgePU7K0H9Nj+CQmZZogpL0CzHbTx1Hesfz9KfdyBHm1vzuh0Io87Lok1+wWFhMAtOUYRehT6kck4M4zqENo0vWQZJE7Qlu5Do5OcyJjD4EBsev3RKYkdoINRGLPo5hoS16H/xwVMUk7lHnr2qya1lhX/j0GQSi3x3PH1nRzGAQGhyUTo2ogRQkVIbUBL2j4HmNEpFqbdPn7+qtKJ0yJxn99Tgkxj5F4UOiSR0GznYBhvvZwiviGOvMNm4ywFF+AlA2VMFI4GIh4TJcLZhj2H9olmhsEtP7G401HWLCPJ5r02PwD4MDRjt8biKQbbbn8y4hsjbAoPxpejZGE0T4h5oQAKCMFky/Qj6opLTfhW5IJJbCa8ur6aTUsYw/w9X0XBvtha21+7rgyH9+6ati9mv50VgPhBY/JButO2twDOzOULaTCoJ0ZoPOEAhdGtXtVeJNJiGWi8o1cV3fImcW+b1ibfFZm/cztvZ24Ewcfxar+rWeMQd+5DahoNG0wk+rq3ClZTKgBHzD5sI+qvT5yTat0LWoFrknZo4xMSOdfnbPpo62OC47VYzoYSKsf2h5jVCsV+jN2KOGvo2BZ6vgiGT6dmApNWqsQ/naqsfgoNm4/XtMeW/+YpfBzjAOa0zupPOj5d7P5x3Jno4H+B5YefTJBvZ8zgMdR9nI9kVH42auMCr71BDseAPlgXD3mKQ/bLAh3m6oGB542yp+y7WjwbeGZCJwntuLuxVI21o3CbbsPF3plfCYg742+jkUzrOS1xNZFBiCE7COgVHW3Y6DFPNlJufbpXt7tgEmr07EIwXj/Vq2XtkQ/vlB0JQktzYQXRt2zxIxXOWqPKLdN/TX2WXimNON9YqJ9KnbstOnZ/bWZJPaDAe4LLO90nP6yivuPLmWeiXjesuuHD58GkB02vxODVr0eTu5BNHOCUs2V72QYXdP3yfz/KsN5sW6jA52bxyikDrGRzV51aLZQ61a8AslnTynRAs+10y6do6/BFeEgEbtmKGzX5SntKGP3kerN88KFHpzyw+XB80Rua/txXyL+X1yeUqvadJ4gePROo/tiv68reGRW27MPuZFAP5RkPLJ3k661LwdaVjf05V4+EdL3tVY/jyhp32133l8o3qbQDftCfkgqaDNFQrjC8hLJ20yBClVLzMioSEJD5JTKrR6gYj4fpu+8sTpYaFRA+I4yGuzWsFlntJGmyxqsreuFTLtSjSyRkIhyYAx94IXpg2+7JbDqmy9Zfv+Q/syZJ24L6dowIgLapKTBAorw5BwCDaaBGg1dYHpZwGIkCc+C6GgtiKL+kZFwPO4ktXbLQ9Ws/qMZxltaC8b21LCOI5+A8LxE+ud+KAOMSYniGcjxzXKbQIYMmv+lW8cm8lXRlI8ZCXEOMeM8VS40yg4R7T1blVW0N3R8oVI5mainrLOeYnnkw+go8gusB1u9d2SoqK8dbMESBoFbGOT+maa4YB7YQ0QYJJMhuV6kk1OqfBmeal52VxQBkaQo2Dz9Da3geV2iV/it82BoViKqNQYdh7x07RPt1ftmksr1v9LWrZbUhg3cM/7Z/ClO2IQjvN2PYccmKJ9D3FFYQFyLsFsW5Kbae3SDGfvaIM/o+ndqXsbw98zKCm8PSzM5EWvsCfe59VopFByUqxNcjmYidarDIMRJ2DS7gJ0cXhG1lYalxWo4OMZYkW2iE+LBac7i13gwfrXGKjYHo+wsIGHGs0cJvu34u0KsjO+QzH7010wJ8XtKHrRgAWb7sWuNV08rtXdjejtSdoLXh/buqwFA1rcnHfQacnu2imkf0R9NPLgHbeoc6iScYCvLpeyuOgpfUpHYzQzf/gTtWf5pVZ4trTtKdvdHD87k8GwZ1iT+zeCcGWQNevOMzopje3kTKZyQmTAN1EvTW3L0nAzjS6Z3x8ojUsYM5dnjhltW4vy1nHw9AITCbmT7LvZZZXgQK+M6Cqj61v4Fk6G+vFh8DDlIcOkHcAs8nexI0QIh2r6J59Ok1a6h2gpgUF0hOiEREF0m/IvxntfsgVxwJcwmxQ/AMpnowlGtNPqHegs7/WTuzGoV5SmMoq94gLY5bT68CtvcidwGh0UCYrpf6AuKuvYXurjtRQEvH3pW3cGrYCG6Ci0IYT/pvQnLB17vilpQOCzvUOp0rudi+Id0u7VM0KOJsvLporNd1EolCDZ1JafFW3tjEPSTSsaWY3eCsEMfW11v6SOegUOXOgEoJw//96zAAazerYEjH0ruKPy7z3AsqbKU2Gd9El5lybZrbJIcIyjJSSHeca676yy4OYx3prCSAdwHy9VRWZgAYPvQMagIll4yYU367RGyZL/oQPNSpF5uDhEv7LNHt8t45ulQqyHc1Woyv9JEOmvXUFrNq9Yq4QOGeoZxEKvI/aKWAuEObRpoWVn+eaaGFlTU8L6e2mLQSYLSmbHcAJ/McawRtlBrLo/1GwmGLyZKSxYtAr9dDXvL/t2rbAZEsYElQjFXcxgFQ1PnXEhJPzmOSFc9bbWnJ8WqpWVxezJfzY1qzcxhI3/KvzYmwQS4FW0NOpcxDx96OtUAhjAJqEtAOLorpYdGt3iJhwvqDWhFZjSxSoVEuSal3IuVOhk0xPpdOD/EZ2yGfRvnW1d56IbhNpC9DqCN+GS1fO3lOw+nwaftpO1oz+7vtiQXoNMmdG08/N4T5PkOZJfSODG0EWHiAHkXtp3hvKorS2j5a/0H4n6QzNZ7yVWMOQEtgUd+2Kpu45sScpjGYF8I2/HY2qmlaV7TTtdwv2+0ivSckuVrhrrnz7ywINEyLki9mfR+6D8I+4aYBqu7dQ5uBxG7FB28MZ5tGNIA2z0zDz7m1zjPfK6044Go1y1FQfwSIW4pMYdi35FjVRgsCPJkim1JzBenoMsH+4dT6aPC2Nu1EN7eHt9seI4XbzlBCl+BTT49m69/dW27LCgbBkOqQwAn9DqJCR7JUZF6r+s676tZ/hIoLs3lbRJrU7JESgq+yYy/xj2gvnDW72fjXtCRO2kdhsQhNycWpzGyk1T+tkc01WmYI8Q2HSs1C9TBRM8MNGokvg8lmPJV3ougwJenJWynSVqs2HfGwbj+QglhqQYj07u0rGazcfMQtQMSHI05Vt9opoi6SOrsh4N7XfzPMNZZi1GGovVRSel7ZzHVHcF/OIYJQZXNRj1U+gUnyGntGY3XPfoel5Drgq8GgW7NPh8KmB1zpiux1I2avi+1F36os/gQVNoeithjhFRHkPkdltrdILVXOVg3+nRwLTE6AfTti7LEWMejqGse9eG2fxRGVkDJNnwg25pqDSa48TgvfY5izLabRSQWJhAlt8eYICNUAs1CN0b6Bq3crunJsNNYvvyDyorSqvEgK0faKORhi6YlOEy/F+qCYoV+WhvskL1i4EkgeQNelDUpHY+krSc3psY/cpXMZ9fFuxpBErCoIE5en3QorBIDA02U5MAsqmMmE6CwJSz9NP4tu/YXQjDJT7gXRluer+Pd02A9P7hutLUPXSPWDb3GXDFz0i64NC3M/EvntZV059ONDh/1ksEWA8E93eYuTyChERHFdvG0QY+8jreVp0WMmzB6n0IXqaLD1y4fTuKixXKg+1nF7jZ9z4yy83JA1TnyXbpr6Wfn9aqrniSRIDU7NtHGpMVN/nJrxQHovCXI6fFuHUfzFQWyfg4kfVuOfrVnHqcdlINWPEyVrdQrH5xp2Yhzy1GJfnL1xYbmLRUhJKF57xdSIJd70HfloUbZGGzODXGXTvOsDHncN7dBUsY85jG9aFgXUXVcs+ceeOWubJmMN/ax87RE6FwUuJKasN2XPo7cNdnLqB+/IFBDXDhibfPgMDp29tuDXxPT1qLU3UhjtORowD7FlWWrP6ZVXYDWgSHLU71BPwUOflO9c4deIpL9xTXKy0+XFM6ToypzfvjOQM0DkWI+FIAB3vuFVcc/GEBKabXWzSiKQ1RkQgGo4oETpQq2I74o82ga4dW6lV0+gbMPOGProtsg/XT1HQnf7kJjKD6j/Nc2roHcSt2J7EcZHpBjYdTwuEBmQitxGiRxeLkrxV7QcEkXTRJQiLYRs1r8Jmub5JLZCKBOdM0shsXjo6bMe5ndg+pEvpZaOjVdlbUUyW6Jfcjmgvz+8b8lS9r5S/nnNyjh6DiYTVV7EiRB020X91lqKuko9LM2LVC21z4rnf1XN5eHJAJtuZM5XfgCr/OYkp4UdAsirjZmuVz5HDW48TMmA4DMBDyEMEk336mkrrTKfRmv2jo7GXm5MpkJFgsY+OOUWgsxQiJHy9s/9xrhE1Tit7yw7zA7ioWYK9rsCS/DqIlMllanUDKE5+/HpaJ1yzCg2JoCi5PIjd2abKpebSnHoWjWkBwV7taOM/Phu3/BjiY53vPOlQNsbeM6U7AlJPny5Q4RUZ2OZY7eMwG62CBLtdMU7cCbortE7XdmTVqzpMJk8keuFOidV6vSF1Qz3z/UDLRhwjLq0U6RSwcpMFHnLLekqNxDal9TKVrWVMhtoiBGGV0NCCK+OeXGIAJx1usv/QKjGJ6seTYQM+ybulphjgNz9Co8pqwcUlxvvF2iFHgXbrDmeoPZZ79+ch0KfdI3fvFer5jFe6TX98zgh2Ea7QwRpTBtqCbV7/WDp2bdaT0qK6R5qBGnWd57iQLyyVuqPkc13RsS9pqv2ZxnJXu/ODGEB7Jcbu0XdPRGJQmI/x+wegb4MH8wQT+w1O9NUUSBHVUOM47hkKf/TQYu0Xm2X0PifIfJNsj7x51f7sYoHzYZg0bjAU92dj0fjm0zK+z/f6skhOZb9Wh1hGCmAEHrh46EWCc54881LX/zZUykiXA6ZzVl4zThXIhQlfaGjGLjsAj6laOUDv0POUyAo5lZ25OM66dHxqJy+IqvCeehUidkGmeb4NtMhjtOOcxkNBsA4kWdiDxzlSqOT/AkisBfwUC4ngTiBdNs+7dufFYfcHjdbxarp/Eh6dM+2wCrxWYa6yZjU29t/in29j7zdba1C2lUtwLr4NGm2WsJXw4UzpxE3/kplC7kL1v4tcqiH+eiWtbf9GNUlTL0hrTcoZEnkNbWv5ISBaD0NYKeobXqZPfQnm38nzgDicokYyVNHJmzcSzKhQnu57/x87p4k9YCdjJ5akg/+/I54rjwWyrlk/c59VqUV4sKokGI/e3QYzD70h9A/G6TIsBY7cQfOIGf0oVcM7Fs3aecUNTCncv5tXnJKScKrRBHJWiylvYtIM5aDQ946sYHkZI5uc6S8+9Nfdc4/tg/Q0i3A6ybSHZV4T256+J7pM//K7y5zxYSZK/6Qwhkha/QWTuwUlOLwNg++etcRZa61w6CA0pU9xQcfeS2a2tTN+g8PhfOeKVEII/AH4lG0mN2kJ9Ls5SsYox5Dkbc3IFLIRQdBq8lbby0AcU1p/bdC6Q7g9n6oHBhzGaLUIvR4yPQYOWhPUxr9zvxc+scm1zQnCxr3tNFXNJKi+6uPR0XRhEnLy1KSfcRat0clPer1gG1C8MqkAjVDM0wkKPObTs97dGQECDITZVx4da8F0+WxhoPz1r+HS1sMEECjHeaOxpVlNDr6VRkil2Hh1xS3cSV4xtFGWor9K9FtBa8H0qbuPqCZGKSmKtt1DlmUDK3Ck7E5Lh6Nu2Xt5EUaTiSfvqv+P+3AYVrncodQ7IxI4VNFyGuJ+paXKCxQtqoi+TY19fEqWkIbFK1fGVtQhtbZxGgQ6a8OHnJMJH/twefOogeHEIRTpqA0Pdn8uNfLYJzPTGUxOwdITw8DNQxdOwBIm3dNPXR89wiKAR7fCweHRmGR3NIgVTFo0onjXQIW45aATVjvEgtac5DCsGuPODzL3OKUA2VMDqvazMpK6OUq1TvOF5w9K8EiE6Uk5Akt2Fl5uACrySlLPw3x/pm/yAtX8gQ+HKkPHeUbBbhmSwpwmCM9W7JsxXrGk2nTi2CYTkAfw49vrt20JzFyMccnelNXdRFUi0Gw1Zd4fEB1Q+9D+rO7n2ZB9QXRk4OywynPrYha6TqroEKN9ZBpgFFCM7Sb68p6FWQEhsbBN0/3sVuyzYHctl4iakrciiPBABxja5/VNAV76UeG6Jo0BqhzDsx7OZY6Jb0I4NgHjZoP2cu88dhavQUcsoE2+Nm9GcdpW6FfE2b7ej/CpPuLFpprAIWAnfVTMsQtazIZkHUKDwZV1pXuSaEa+3Ytlztg1gqfQ0VZLxGLmVd33PKAHX0bDIvq820+Ty1tE0zPOwFXk86WQXBVKpHX+ICFtVfoAMJ13VbrQ2/nsE6dA/JzSIERaz/re/ytnT8mkCMK4JNC9U9e1EsHSms40bSODBoFjsiMzvPS7cnPj899CWztFcKWIXuZFGijIoTFFYXvMA9wND8Y6aidLkXE713/po8tl6XdhyJhAYfsS+qWxZNpHEZFAHGC7gLBcFoCmn1v0Y/6i0MgBIjRLaDyHE3lVavW7EMfAAqtKRS1jM6NKp2itFCSwhMrlcOUpfQAB32OpB7EK9VqxRl54PZSwVQmdmS8s2IgtgKLOQ4dAtNX13SRVBRah8bkJbYAqVDjHFAjobNmRpuL7btLbxUMrKhX/3mnbASRVQJ9j7yRKu2DH6V7ts+R6L60H+15YUeaM6vI7JQllFHcbZMPzKgpJof2ZkM01HlDfHwksEwShYpugPNuc7M/G1TbHZywE8bDx4f3j+3HFOAnivHNzy0n7a8VVAHKM+6VXFh+ZOVxC8NPxYCWEF6QJAQJJHpFM9Ioh8ExO1P0SMccZoYwtzVPjGMK93ek3Ie6uBVFABxRld24wt/DH1Dt4z2f5ABBt2sFMbR6/XniuxxmPjPma1DJnYSW+ikkzgeuWiCgW/Om6mnkYTIAS6/Pd8iBmcLq811alGUjf1JclUwis9yHWBNErx7rs12LUXcU2Knui7tw642l7IMH64Ql96viLwttrGnLoN1TbP5HlPeI4SWNBhPO1dU0sIHSZMvDUOW3giySdSVUSW4sHm7uP5YiGh9YI44g+V7pDFJQ84grWbxhMbE5aaKN0reQfOwCF1TCcRxRj3BkzRsW+rEx6HqykScruUKX26REo/ilN5FUqeELdbS7lNZj6vcv9jq1FhEF/LPA4+A0DU2TOLZqWH/BhRiWiaSrf4RIoTKgSX46zSAQzrEurHy+HCCNyFAOxExXREfCDAwU3UwOfkD44eh0QnmIbmQ7TYF+NQ1a2ffg1QShvRlju+4/hKqpPM1DuiR40UP9168b60jaxhmcJ5A7GWtf7vDH0cHcx9EYdTUWgwSHRyZqsPt0HBzrZstGqMxExgZq+cxHUudQ2FrIuWCgEjJMlW9jUfjOkiW+zqTa6lP0zAR2i3bPJCikzU4ktAoH0Bjvlq4Ut09jxd0DDya/TTLuDByhBlYB77Glupymmsr7Tp+KlN6nf7VCi6bUMY6+fRkN+6eM2wv+rudEOkSXJzMX7g8xSwF324AUHQNZSLQVsdl8tL2sEVt+tFNouTwIJJbVDKAPjQhLeTUDXr3Fzae4ghxT45atgxXD4l3Ty2tsZN0lvFqP9NHIXQcznjGsbcWkCCgPSQBFhmYgem953kZvOUymYPWq4eCrkKhuptmUiziMplA6aSsFtLKkMbgRKXd+jVolD4cdyYPM02PTInGORBb3qowseiyDFr3AxB509udpf8Gpq+kEcztfZzbTD2yzhU/oeR1HWyIaZzpKpKfkkw7oWhkbsFoeAG0YSiDGDkD/jAhkXzjlAy8XRCjs3YgDletmDw66ELxBrGPSKUDU4aIUzdshHHFkFQBW9CXNIbejJG/42vrq/HDeQFqKaqqvnvkpTgARLhSDIQAnmbSNlnYJ/TxVp6SdsH7eNjLbm3vDJqMz++emLy5LeXE8usvYfrUc/GW0q9QKtWCF9lBhuTkZdGQMIgE1QWN6lS/0wg5+xgnlngRUQY/5to0av1cxK/HMY9FLbk2PPinNwUSb9XyR8/vMmoe2hb5JO6cxORCsSAYOuuA5R9RnzV8joH2fhNit1KFzY6t94cnUm2jlTCSdFbMmoTGj/aAqGSLS8tOH8XqFA38F1HVjdfH9claGrQjFgLtWxiDPxlMJjCbP1s4i/DbmH383aRl19i7BqSZirAcHUdhYH20nh9zXVIQDV29OMlt2fEq/OSmCBEX2ZN28zd6hdm0d926x4Zj9q29unTo2xtfCnYPFx74mCv9ze4FtMaLoJklvy1ktD0kTN8vPe9e0wRd25ckHilZtdTG2W09Jfa6VOM5hFSS/TjFvWlZRAXWgXRHSXEe9FTBn0LzPc01u2bTAQ13I3+fQShppjrTUP155WJpP4Ek8FEIgMixPlWWeSZoHQG3WCqpRRndSZ7dc1dZefIhxB3CE0SefHx3lfuXcZiEMWWxwri/Uynx7OSbcZqK6kTIOraYM+AJ8m4QYXSMBd/qcrNWuflX71H/+AIkGAY3YoMr73PJBYtBDe2471PRdAiYhV8zMza1e4X+jz+Sn027NPhRFtNy4yt5WEwUBMdPT0Gj22O8S0w/+HP6TWKTmbHUniJvfcnjrYvXCcvSIjzG5gvENx0nITYC0fkFtblaON1T0foxSIKZ1MMOTtqC8UnKgHDdU4PXsKJe2cB6hziFtrAygP3//tib9etmG3MbTzSLX28AYqS5YYnCC9uQWwJY2W8oulg3lIfWEaZLpIW/sNUkgvNpOmojBx1ufHvxwg8v+DYw0YR5UWtx3uN97zvHe3xMN1phZsjepBpC9CvJvG+Mdio3z2Heeod/0fhlR00eMukcC9uPPuf8jy0iarnfQzzekNpi6w14UenKveqvhdVEs5JtfSQm2rVwZrS3cR4FqAdSBq/5fG4G2iZHx+V0ZePoyyhToMwrA1VXRNNSm6V1hAlvdODNRv/xaaQ06ia6uprEIfFjbnWsRPiNGS5jzx4tuJAthwUnPpvM8aWKwHuHrrJktvXQNIyDokN8tb8z9sTjh7n1R3wIIMNvrxJuqd9CtenOZU9djq3+6b31CaniF2GZt6acVzf9nDAPhqdZBzStZ4iOPy0BrR2aUQVBs6XWTjIgGiwC9qq1k7LXRBPtGSxJFXn4aNuGVb/pyO/vHL6OLvVm5I4uctcROyxRmAPlRQB3tgL93WCfP9rrqI9mxnoLIapZg7MqqR1XY9AMq6nzw0IZbvt3xIScW04aBa2bFYpzYFEIV5bWal8S1LCPY3qfs0wqAQ7pRtxOLhnPVwhA29v9ocxoqOTQZMXW2BvbLVr5mEAnak87wwxSEzzk+/ylnpP15nk9JL2lwqVIuRPTKQbiI4pRkackZdF1h4r6w9aYaOEHZtC08KWDiuRw0Tj1SO3pMNcjGkrqUA+AbEnSAmKFOSKMGXkhwj/PG1fCve5zEVEm1YXRfWzzLQjic9AAXZ+ng2bk0u+ZMfyOWyA9J+xZdA2Ci3Xdgoug2TCABf7bZcv4hr4j1gnfhMhj0u10tzLgn7gxrzjFSJJs7cWUR6npjL7hhIts/xR34VyBA8bov/nRdpRpCM/rOMPb6aBFKDVD/RG5A8E5yDL6ItYUanaXMhaQqFFNipjep1i8edaMwdOUGS//ztBp9XbagQkOxpX8fp9sk72xvJwARg5hdplbxdf6dEHORSSfDZy0RrBoqMVUj1Yn+WNQNwNCvQKA+qQXquGdXU1uTAmf0wk9We+XDqEzX3hgZ6nxVmv8yw4ur5oJZJS57soaiGSd72LQ8CfdjDDQdWeP4DlpkGdw/i9swqfEYe8S2C7JzJaP3tJmOYROdmmZYW1XjZkTSzazEn5jc9hkhi0keSjNi0B2/CfPxp6e51BmUk8oPuX78IGydqqPu9nIrbK7cSWRFZRgue8PZYs1xEucPZZvFkNRC6Ti1FAhXhwyn68AWZzQGBk14eyVjhnDyM4VRJnpieFzv2EeGRCZalkJBZqQ+yD1Zgudbw+cN44lAQL1qOcI9EWxu9diE5g1yyZZwM257r6k3gZ+Q+MIIf3Uk8+KFIQiod0VX6BYHqRQD3EMj8HZwAiRc3iH9Bxo68OyTT1uaN/C9ErChE1qp4q3EUdUp7InG21UUi6lK4K5/3azQ/KSPMDIY/cYEaIcrmhUXt+2AbryVUdhtiIpx49xajhnZPZrTV/6qNuFlHFQLNahFnZ25rH/b/a4KtYsTSZ5wHWG0V7zakrj50Fe52tOjgqgBofDqpstTQ7KKOez2NsOa7IMhPBGjTg5ylb9lVPUrE50K+G5aC6a3TAGxM+/HEHnCIAVDxeZ+Z77gwYYu9/qzNijyB0PvCv8JN0HVTJ+CXawnM7xCwHRTCQ2wJaynHUzO8EGSpp5xLFHQ4ImfO8dWAvwhyViQcobj0qrgEVvltMTcoFINe2648RdLLL7FTJebAYAFJfei6fXnPjYjoGJnuVRZBXuyy6FFps9M3cNnCMhI2y8Vcf3BHUJ8ds6IpAQc/X7C2Ib4U45fgEoFmAoSaEKa0qX7Pnc5fzelqA4fa5r67QsTqyRwiQkCPBHd5pEiZk390Eyy01+TsT2biaOIuVLj/HfCj8if87wKy+i0OlWjqOQVby217euyTLfuCa3AOKpSuJ7E5pQpJG/9ETnE9YAleLiEVIk9Rj/Fww3H4J+tKTQ39mYfXRGdPAuaBUXpjHVyjqmzfonzpoAU5UeqZRPr6sg0g4w79g26S4rN9SQB/Rl6zSdN0GXdyh9yyQDzje5vsmPC4mWms3tKuB1e9fKUNztptKG31RwJ8LS52ESdjvoDMXRi9cgHJD3c1dSSLnpgPkUs23aYfx1DJwUIsfQW5oaX4uT9vJuujzsxNgfEw0XgQrLWj9agT84dtKbsB2fOGbaqycfhPIKeUz45MoJyZx7bjLNHAUI21jprd5qcHVyvzDFy5bS30zK6ruVEogGkDPhwuxJ2Bxe7a4QprVJTel++I/uSsGrLVD1Ka/3M6XiwjVTvzUoNKB2uoOqQl21Mhfz1SwAsnL3ZyulB0ATw9JdP2wiUPlVGpkoELr/dchbrNPYjNux9ri9FoYEBdl97V/EpS7SBFoYqqLfv1quYb2ISI89ENIQAfBNICmSDN9gxJZRGAzTKdMAFImFajE7j4l9TzSdip7PDyHoSCgJRFE2NFq7q2ujBCFCkNNSPA8y1kBw2NZyky0N9RcTrYvwq1lv67iU2Kcw7sw5E9US7fXhC4yJGyaGD4iTtbvT/s8V3evwt3T/k3aArsKHS/UDf2qCSDMD6oBRHCOb8Gkfl8J0fOxr+F8X80oz/MeHB/RX/j8jBI/67IRN3W5kgFXetdPeJ+i+aVuy/RyQ7583q+Bj8ebJbKXnrA1U5k28En7wlixZ6qoGLby2ccKvgG06ADwzMpulhm/4B8+U3FGy6zWMSdBoBcVeHKgBKyGMOmOOvkQdddD/0B1AzjYzjO8iPBk+jeBY8e9Apwks1ZdB3ONgRr2nqdJnMlWOcLs0n7I++lOBCS6Qw9XqvrWU+K7be8fc9s3oFI5THYjI7Mrkc9b4S6CM+xen39XvQwyLe1vjIyHMmX63hvSkK6+z1to8xXi7VoeyU8kTsVf0CWOb9CkgEuu5Dzt79iWz+ci6pa3rwEjbhFloH5wsQ4+EUUiF2SbSbmyaVikTY9FZwzPKvmrQoDgbudbqpDCzHQsIP9sK8/kBbeId5FKGz5FeraUyJbAdgWZ9zWi6G+UID471k6l/pBwwhcLpWtvtL8yY0HiZkLsS9rRDACJXZuvm/LWpYnRYbe/jvduvImviSjB6pOB9v2+zkRIDgd0pRVo+C9gNQ/gvtfmtWTM+FMrPC1OGMtTUS+ghh1XaOU/xJv2ry1nG+dujBPespEw7CZmlj/USgq0VxPvfBUvAWSvlA+fuuZ6scqj6pSsusaeKLwOlrxK6ah+YSsa/KmyGLNPBysSTCTlPEEkHsU5CN/tH/EBd2lKDwd2MZtHRNiceQpzCqiUS4Qt15qHhwVDeQJZGTzHZXnTvZWRwzT2bgiGQADlYRGR6c7sKWduncaPEuZbjY76wmnKfjKuHWzQjl9XkN4A6L/S/8ZjDmKgWxZ15ayV1raddaryHrxEGgP67WzSXfRO7gqlHFrZFV6Q1ITXjaTtYdmgU8MaP+VZLgfdhilB1pCFAtGBZy5VlTzF7Ne6Gx8SQd+Rf/g+hQaA04PBgdAHsaYQ++6u0CenhfaQgHHYxzGd8B9wuK5fZBrRQEAubLbnQxX6jwdJZnLbFEOoUyGwI5EbHTIr4nnLFEpYFHo5RX1CMBXRGbkeyxaf52nZrrs+nWF1otWiHf9yzEY/4d/pLlaeIEubQziIeyWZkcYzQRZvHUxhnk4rZo7/FzdDEcvORPGtgc/yZ8xQooL0iJO9IaGsYQeu/UVxWhik86+YLFZ54vIVGcoLdeZfO1pPk3MZK1B7E+KdMhQ0IHEp0+Lm4fZ4CTyv9VjUjrhDQOS4aXXHuPG51Q6wHwFYi4Muh0mvAytjFR/UfW8cFlvkspE9RZUUOa6yzx7TtIj263pAyx0M4UGea/0KxWQiUC7FNq0JCIhy/2WeB24ABSiRD0IEGTaNJltZ98fbZjf0pnKWxcJ7CBPcv57Cc+k49UVkt6n90kqoKHiRW8jkJ4qlvsesy7Gh3UMn/mjwKoKE2aHflAoAARYbcXfPwQ9JQ0NHfqqVm5xPGfrkEf2AEXDiZKea2nIRRSI09ilmymFOtFo8yYKen5C++Q2u7AjgOUdpSymO+KxsosOp1WkdAzmvsZA7VM37K7UGQwdl0nHq4Le4ifNDo6G/KUxBYo8bTfDNFpF04/PQCOHzh1bwtkDu6lj2am1e6EP0LU238MvP4acki9RiBfA86Y2UrIfglDr4GAaCQ1frJ+2Dtf6tUX9f0m1JQ6S7OrBkyebojYGom6zcciAQc+dmSVUslHCPa/Vok3Ylz5jcwza2qD3VP0q4XoHeaD3+xQUz1X1j4ArdhOxlryRYRy9Lxd53GAbIgjRH4/GTD81to1MM7eQc86pUQA7savQLGScER8PYCKv8vQ6e/IJIS3bjH54Ge/qIMAlFMww9noMw5UDuUtR0HDKcxhIzRsv/Jz1hHzxCyW1+ACNMbefNx4+AdT7t18orklHEoDH99RYZ1WfpPhE4JRboF1dZ+TzDD08XIMt8OIg7fich81GO8nqbGvcISH6cjXhyH+ecPmnMnWrHMRwdgZDHNJyjqIyd57F274xitxREy8TmhStoyr8x3DNAEUdv8vNhm7baDvfJB9qHzC+PsFjDf1s5kKxQdLC3xLN75staSZW+LmAJpxbcJXTeVwQaz3gomdyxyoq6NQTGcjH0kWHTwtPZ4LNfTfbEp+oeGL3bnx0qwOQ+G7jvfO9WMr6ZmQ8WDzrPjKdBtWK28QqJnV7OztVLq4S7hKGYCfeUTlQ3jAbJajVgLR1INE1ugkvjGYY4aHozRiBWDSyoUlYXlpYFRuDYtRtUljkc5BK3XwXmqyH+bddN/brGQNmQbXwv42c1rYbzeiisnuo9O25Y6eORTyQ9bT8KVwE2UTdznQl3B4eD7QvyMI2lfvcuGcfkwHMEghJzqAI/oW4R2BvKHwi/+AwSsptoH9LZz6QCejPvYCYHEwfs3/qmKlqWZBS4uieEeC64oZdrMU5l74xU0zR+u/dtlzzTcT0atIuycyesGrL9X5klgrgW2JNnIoSg8hO0wNJZ/ZP2kvGOj8Ix4dH9AzL/jW68BZ+9AfFnNzsqbBtKfcoxkNGT1mvaMNUb+8gw5qGcSrEK5Oe9UUDkfZ1CaOrzuNNuLLmaVfLoQlh67P7TXPmNG4BYwzZDZMan5o4BQKjUbO5hVijRuD+SsGrAeJiOLjh9691HLNRnVgfHQ8eDGXHipmV/G/nshRYDbs32Awky9Z1CMcOFgSjSQtjRgHSoshozvgGUAzCmd9aZULSzrecibVCDQqW7fdBl+gU5GL10rw7WtzT00Fb9qky0WkZB4iUfmbCUP1b2Ru1Hpk981f5N0mNzj51lsvucTOSdYeh3jkbM91keHCFugEbXEFIZEuFPSy1fAuxVjOzcqaAACbdBAXZImCyI5pbV3mxGf5Zx+IWA+GiSkO6MO9W7bKLqhZb6icrL8t/9isRxKJm0EDM3D73Jm4CSZJqJmtJ1kDuOMiYYP/+b2mxBjK+2GzBLFJPqHIQw8B6NKMNjreaVkx0UXh740k4HNQlOXRXCSsJ+sTX+c0tZNfsw7srgPKYtVqAZwzV+sPl8csIWW0XXBhNyJJGo7YLhmNnBT3orLddnhd/pkorvQWkfXgpcNA1Bl7arHnphygHgB22KwNlUHzTdNhyRrsC2kH33R3nratYlE4NxrWqE9bSws6ASSjn1ZsftXqaCqnuoa6Uu2k+bwv4fpPDx7l7lO93DjXuMQR1ykFRBgvxNLPg+rMS8V+B2WctAjlTCr6J3vpwEBakxZ/19pakAUgkZAs9tpkKXWo1heqLNlLGWazlGxmDsFBzoDQ3fVMLLrvVfcsRW1wm8fyoUYkiq0aYVx2WUbBVfkoOE+Ib5NkI93K/7HFerOQU8v1GWmeSs3xqMRHV/5zM7PHzuxRQef3Dyuz6FGASDYoVBRLVo1UIGboOO8Tbk2i4QuKbLbLIIg8cKmucGnmgM+/9jlqptpwi4eDoEKq0FqtH0TmGovr/QLl1q0C02KaRrj5SjTPHPKU/LCN7DMki1fPAJWCUCEhpW7lWs+K6Xlxh/GxoJrxlkq/OLiKWFhRo6+ndcs60oX10YRricYlmEUQNfXynJt7aE1+a7slW1nzUHO2wWmV4iO0cS9umbU/G24ROPF9rRjLCNVggNzL26nbDnWEqMGArYFR0Xz6IhJFjjhsRHT7mrc1BPi0qAPZbEHHEetPsl548r6OiapnkTm9rArrNZR9NgwjHbW/VEnsNhsqTAXkqv4TN6CU7f12NZJZoMuDz/zriJrnsHtMNqeRmZ+kqujbN4kivCXxa+ZE3Q4/thz0bScg4VlG2DxoTuPbuMPQC158bKpHldbvcR+PV6Acj/5CZoCdutMwE3NlYvjnDrANGgS07A+1/izceJ5+H0ujWeh/B7Z7mA2xMuzkIWSfScbJ3R3wmfS4/1iloYtGwjvPSqGvUtfycycyrpgQqE/pPYP0P8ufK5+NOGNuzkBe4ELXfBToHB9UmW67o4kKqJlL2GdYIq+TzNZfggfn7sq02xnyC0ClY9S8n+OeFGCfY/WwF0oQmwBzxmy8wHwcntqQX9G6IKOX9AwQ90jkZGAjV8fj1fp485hga5N9/aZ6YDM0awrv1g8NbDYHX3pa5j/L64jtAzEjQlzF5lZtLAp57ypqW6VP8bUyr6ljiu+aLe1Bdzpa0ygGHeR9GotoiwDB7XORFe4DiCW1OB7M18yJ2W1S9M4QLXO3AXJsHHPFIszNANMx1eflAqtFgFVXDZub1aOcSWb2g/2Bn8TD85s2vMM60jPFafg6g1emDCt5VVIZYgUcssqQWHXG08ZNzSoasTgRpGQpJYYK8ExiH8mle4pVCAvRXn1wlh9Z1y24Mz3Ou4216KRBjz+kMQg4r5KLAWMbYNvN1EdgncCfOZuo4kSoVGbs7AbdXc2hf4MsRe+jownw4kkcXzGXzQ7EvgKUJsx4Umk1noGbOQ54GCLc7LLXdc/Q/FLhzD/aluJKXamEHm45exFygihmGZdCN0yELxgza0maRExjSub56Tn0IgZL16s7FJfEAuL6I37l83OsH6I33r6exnyWdabVHKo0YWEAshVc2Iq5OMu1zSEeGxCl3MuRmKLKKWRD11mATtH5NG0Aa+ZfSAyJvFFd9gKOdBtp7YEBF4ZtuNlNRYLSJqw8YhS0X0Y/SUD5AI7hzuJNJbhzKdR8eH1PXwm+pxm+yu+1VqZJO6kMObjsDC3MPMGLQzANHhvruZPDlbNWNX7ZqnOZxGOrzH50nAQofbkOUny0+qRhTW/k/Y9FN4fmd1IoFgZ7Ul+75V9P3jlXvje0H/LjtcAw2Q3EE3Ghbkwc/TVPdNUH6i1iSee8VfmsFls4vli5aSIipcXDaCOJXU9rOX9RwK+4gIZ4Mz8YS55TggdZm+uelFytMl6ufW+6ps7kYRizqJny6YUFkNvRqN19/ZOPPGBwYt0+x/5ohNVAJ5T9QikDba23ad9nxW4qgHqAiQbJHZaU2cxaj315Ox1MBtubSS7XMQoItdHoy5LogdipXS2dQyj2myuLh/MbOGWSDvM+bwLx4AyG9oK81Tzt+U2vWJvXUkFCBiQA2EZSy3Fwu2clUgRlLTMInQL3mvF6nXm/ocKZaiCcuNyfiHsyYvB5y5narKnHj4rFtY7io69WYmrQVHQFfHHWDxmnEVPlZ0mpbnmsjZvv++vW1oHLEnlbqpsLWp51050+PTelYPGEm2mTthLYZ41MYyKOioG9SHoP09GjIcqOBdxMvorU0ORgdd5Ul2qWQi2+LmdXncfdOY3ZD2ELbDtS0dkldr/8MYbm2b2GtrgVQeBBj8kf72DzuKM7qaF0LuiE8iRVUvZJyzkinGP0lJ6VA7+tNkYArHAbHaFlFjWSu5jF2nlUc3Ghop2QoMVM/Dz7D97jtzpalRvjglEhneAW42ggHtWQf/emG/xl2wwA+mOO/uKqxra2qhtR1lkLSWXak6wukXxOqHXWjuroIQHQgZhOttS2saf3ecRVC/WQou5wfMjbns8ew5Jy4B9pNXfG9jTtxMRZd08gEV/yth6lOczVzaeF7cpE8wLFYTSc0wJUMMCjo863K+cAZeOZAnQJDPxwvdq1Gw/qkfKHnizy2QGnAbr+ZOts8qUNrWhOArOBrZ3P3LuNQ87O2Xs4CB6+wc9F/EEQ1DF1CTU5OXtY6tGOsnif35K2+inB9itG3YYMDa6q8tEXc0DlbPwR+qvCkvfAGs4S6rdxQnBcRE84NvXQbZq9slMR9nKCSgfdjlypMO6QV1NOsERb/zV8+cPD1XIg1jd2UaeiBB82jPRtrvFXIFEsIw/iTAxJnXkoijmdPpu5A5Bp2qYaskB0AX/PD9/lGI4izNLbxdyf9ABGtiegUDMWusd0ub6gJVAYXswnoEEUEmThm+K+6Tg54HnLenby+EjvqUdPuPaKTJ62gR9EZY4bN2A5GzVNWm+rQ7XrIkSY78GjJ2bmLCmQjZ3HageMNBi7jrJHCNp1ah+XBZtEcLngmyMT9SMwGxtvlRWthl50lI1hZ+eOOqx4xJdV1rHTIUuNN0z0INcs6Wtz85w/FxDWqHXUGuOhFnESMnTArsxknCHwh9S0ay+k04ok+wSSmnRDpJvRtfQ3ndSilmOGahje8+0KAe7EMw/ImzBJVudttcfGRc9yBLx1oA+fBvb/BWljGPkXHymVwk6HlwKGmg9hvluQxa9e8L95gM7lAEzHODaBsXUVw+gahmUTH0fWagZPHn5MoMZzAgnN3Z6pmfxtLSMOkRan53lhvF4LlTdw2bu1MuPcXJHchu4ywbGa6oOtbcpyogYTe5aYb1aF+RwAL2dlpysPrH5dNKmr1BsVYn+kZP34KHljeNNXL+AQLXftu3orwKmGoy3k7VJ17etjmO46OGT/v4bAyz1aW0pIzwTDhb55rr7EGWlW7xp73Ch/CUUrJP2o+5VwdkYEhgRlJ1FgdpJjV4vci0TnfcVrWrMIXX1uyPhXNfap1+im7va/ltHXrVSKEZau+5Xjg951NSV+xCZnMTuZ8pAKxygZspekEWhdXjppYwn3WgSb5Jdcp/z31TSWOzH1c6Zmzmlvxj6iZ3QFIdYZqrsO2nbFej/0ujbTjvQJFNWQWr8uXpY4zFjDtvBSa63B5Rh92bHSlPxlZWNj5bzXkT7u3rx2AdsKz5+yj8ovRzVnUGDlePHwzR9YnKinQodB5RZgk6DP6DFAFsSkP6RAc+fq6oJEimuZLNqfEH+W+DDICy5mD6VlCr5PAWbYMosJ/WUGXdpmvGBEGd9SzpSYmx3VJj17SCM/+7SglKGmgApfXrCOfNns+FprTkpsEsOU+/IgwGBtwglseVoYgAuHgqDI4CnilBIbqc2cRLpdLjrUG4xIGaMOfiY/q5OmTP7a1t2CGFAbhkgWz47U3raHmBgrupoVxjIV4xwz+mpXCh1GuMZWIcTB1UA++5l0Cc8Ayy1C7tKb/uPqsR0xQMMp4zdF/Tdv7+W543HverKZbpn+prBUd0MvoZ9fE1hFRNY8z7UILFk08RFCydIV4mkzADMYexvq8HsN6lz7+dKcscvVq+Jn/E2euebQZgYN63wg9LZt8qZzASehzKT69FmAjFunAZ3UDvWlNJKjaw0NjtoM0PYxGRbD/O/I+yPji4d6fl47Erw4xxNfJ+JZi//jHafZCOhvqZK52l1cfURjn9jXW7/nn1BAl2DUHrsOPHN/dMrDFIcj04wes3z1SE7bJKeVluU2Ux20KAIxUcuGflVxf+KNPs8kFYAirpfrk8wUTOwkjnErQp5dgYlE52AJhtDEZoBhSw/JIyQHtyEHWTEMEtoBGxPph56gqXC5KOpM1JmXA21IO0jQ6HxHANDhJEdgr2QxhTJazuSxR5mwMFAeDPH8+YOkFuOzKS3VDXMc0sqS3iLaCHydPJny6QL28Um0D01pcrCyNnU4clPl8QgvuYa90tp/7XdQlIaIe2zldCUkc6dA18jb3Moa213laZiTE/tru4mm1KNpF+4zb5x35MsX1rz4u7qQoIUCImRlLMv6pVo+koY6l+ta9+c6r6db/TekHOQCX3qoFWDgh/Glqr06TnXktJ57xvkYSnm6BEFxiBjTQaKcySgyMSqiPzoL0K7sfBxoBKwr2TwgfC2KOEmI0v9Y26J/lydQwBLnk8HYrht4fnmD9bvMT8uP0IXJPUwzGNkO4ZdRLZDBtsJfzKctOv4QwUL7qrOQiSvY2DigFeOV5jnG33GZVtt4dvcOdsOBeo7EVIAUKTuqjge/6/Yti235RdHsWhddpAVP8HHgHS+s3wcGFXz2kSGcAr20gRvBGBAvodoAaBjxi1/3lBmFuaWlSnAS3Lt3QPkD5NtErfkVmUUXIqpdD83MiNO4men+m34wVrg1/4u9xP96oStgGgqJgaCI/pw6cZkCcNbJgwGd48heVLHbWreNbc3R+TWjRc1h9p0W/vTUBdeJ0jh0/hfan6PNK9fFdZQ5PyH5ACRB+A/fB9jUSxTzNff4W6iL0BarCW/iLeo/NN+2jugN7h3CVzkkpTpKGAy9OD+IZsNuWAom4ByhHgwJr/x/XU/dTSkthwJsxwDrwNuINg1e8MpvtzhCyvG6bfhUoKtpQ2pVa1zvV9gQnMBhnQwYD3xb1zJpH6xIJjyPll6CbruuwkIjwEjnYI+TA4ewt2Dk1U5dk8ATUipSkWUoZRsayVDIDKeiW0ZETocRNbyvQYAI8m8G2erkWM2tFzpWR0Aia8EAId5jDB11e+huN6JPD6Fj/JR3WBtgi5FpFY41WuzXr74tJ9g8xwrUOCiyaAGGyCXsQ4uI+dV4AziCYVbRtbg6pFgK+ku7EOtpocafQru3qiB9rWRAAH2KqNoEZEZClBuQuiV2W0b/XpzSirCO3Lo5uTm+f29nEqCZMjLe04zV1Y12N/AuzojWjR85DUTKOT31Qs5IIUmEmO/k+bkwZhFdA/y7OUx5T5yl+AwqZxosFq0Fi6PWr4u/0Df2Sth9jk5dG2loetHCO9GMd6zV751U4Hr9jFikaiF6gPzsNjZkjIxM5MmFhRzuinKOAJDbKL9Bji4gGo4hCkxjVNxDeyxRdkVg0relmUHPITkSiSRbBuQqH1EDfLHSIaCvfj2YjgX941uR809XlGknPEpo68Zm+LcimXhj18gROrNS1wfM8IbZ6bB8DzO2GYOSxiCw44SNOkQ2lGocD41zNGQs0paWmMZyK30ACYB5+iN0L/9M8c7/tIg/4e9UKBfH6LCOpUaZ8ADr3m0XwzTxw44E8Av62qZkmk7DcImlcSwQKS7Rati20dDn/1oNafvoUQOizCh369m7E2DnczE4yhhaQSkrhiKAKloRID8k/7bJ/WGO47YXVZgFue1jbeztwcdUi4OT/5RkHcSXl9mDWzJcatPzgLfk4uxXd0eBKGVRC3dlXIiEYTrvtBWmN5dVM9U6OXID9gdq6eGLZebmxACYst8CPdpfEsZkfHvbyta11DMUmMIo9U6l2MGk963dG87fRKSQj97rDVE5HBPfvppci1NfdmNmnXgJg2CUXUerAV39J3+93sz7Sa+5/CG4RveCzecmNnbgqVaP8hmEO2yYhjnso/le8XlRF3r+ALeMZjkMeIUmVMDrMRzAGsUQTctlvB+PN+0+yHYTmk7ttOxHl0ZITRxIjmaUkOgc8mLO42in0bmHy4Ty1FRHhiwzjSIethf50uYu/cUCeb/tMDoGDrP73Qu/WBGw5vm1fl0n0KOClTSYPGCy3T8RiubsQ7f84U8g5C2DHTUmYNJ9uT0dZPJWKkM8WKXGk1jplpQt+IJY1VzEv23gE83n1zESYmA02W9SatGlbZbajYEjcPLQnjz9r9oVxEzny1Y+6kMQqK6BGb5TYUqYIsxsfpYWOjuvhPnRkDOlgw+/g1ZIdelA+/BmmIMMMfv2cw0BodxptkA1iNmbaNarUQTEbPtd4PVt+UGkchKs2ueDAH9pV5W0lMFhs5HuZSJ1eOPcRd+ejSstLU99nenpxmSispl5MXIPMHNA1MGVkCgcL6pu88c9LV1vA65KJDVu9ayzuQkhrEM9LNSELr2tauvaOusdnDC7mcB1phhh7bhc6d3sMnJOWDC/iuSW+XOGR9RPeu6kJbrR9pCH4yd6BrtBSsEzC2iVE6gSiNINHtLBbGkx71NKBssb7v9dVHuB1/usYwE8GSwVK5YsAqi/3ROJrW78bF0zar0eJ+XqrDkb0dSeD0LGqpft3L86vZc3aSWMI+YPuzHUVI4y64chi4Yq9Oqi4CvWUmW8gMMH+ZlEYu2jMnrZfqAXmpjIXdVX3oR4M7mxQ8SOCrU2lRgnl1GaTJOYBk/6egAMcw0W9ero0SLecvG+9MtE+oGxqv7tLR8+yVfE9TELFCJ/sF/pyCUtQV3ouISim2q1xnnJteVWAygO2vsgF5tEEyBavBa4kyNRfSgQ/VcwKSRyPb06YCm5QwMs5UxcZC8aLRT9m+HL//Exo0coATtyhH7IcWGGRvSwkJoIStJS+4OZbYL/Gwb3JHSu4zz1JivZG3iY0HrZAxTnyEYdnhVnFmh0gdC7XepOlHxejBq+pvKAr0DmqVafb2gPSM+kSssOa17QsLSo3NFF22lmMjnN6kyNi1iuewByOEqFORjzKBXqnqM8rw0j4TfjEMUD0n6VlhvHKTufy38eLR5eIdA+AciZQ9mFfeS536fMHFQLaG3GhC7WeUTyAVF5cx69hSCbyyQVEs/1T/dXx/l7dYQ2e9jnv+1FGmvpXOVQD4xJHYQvjeDRf732RFEYmy3xBrFa0DoiDs2vH/7YW+mT64W5RA1LOXSz1Nx9JQFy2s9Y/67vzShOyjF+WFgxxmiA00f/5QqyTTRidMSeqWvtAF8T+bzKKx3rhN4yvgxTt2SCsBgqct5V6pMICdwIofNK7M9dQBAHr1jLYHMOAg4bmcuCXd6gAcns+TwrqRy/mSKLEdWvJoujmTWheBY5Pgd+jWsC/BHRarfCPTgJobs5aF8rfWJ0VVAFpdVNKIPlqTJPYAniVQfZVqlVrWGkujKXb6bv8k3NGRoaui8szyhjMzI8q398gEtExB3InUlo2AxXKZ9oKjctjZ6UAQW0s8Usl5W3M70Hlib2Y9KJqMy+V9RCvWrQDStwlOmHccoauYog5kXQ3h0dztz1qS+tY3i/gTM7pO4V8vmzlkrb9aKrPv2vUtsLfegTKXkBX1s9CWhcMbx+Ip3GgihQV9r4j9E7kLOUg9g77kXvZyf8PRpVbEnTHZ17KlBnkn80Dd/1Wrz2qTi+721p8fV1GPQgN6/1vthjVjCfJtmLLLB+Cw+NqZWcZE9lL8RDBrvLW3DzFSAcmoldyPQfvLjtGR2sdzTxv+NSu5yxLrAI2hyT9iUUtVDGmbSRUsgQTbmobLgcpLdWGjD+Pt5WqA6we1tDGJ1sjufVCKAuiQFK0w5yDkpj7xnB8HO9yPU2LLYhcsR3erOOG8JPdFI7M62YF8+sX2ic5WucRir1mPBanh2E9Zq2fKeK5Pn4BUJQMCFQ3q3TJ2DC2KlKhePE7Mq3+cuxNlJkNd0psU3ym0JV+qb4rqyJRe+EWSHXfpccz6LF4pZ5YYycWokBkh/jOrCtKSfyGF+WP8TkV7f1+voPDb/gwLEg+lcViq5/TlpekEk0s23k+X6DXJBsfNEkNvVuusPUcidhUxyImB0WiIufwbyCx5UpdDBKw4oVFhzmlRX+6KDYBeIJkFhiPGdG3NqfzJ79XoWMWM8WpV0pbiLuaKvXTm4gezouOcPTzA1hycH/aNQ5wxKCQo8l/QhGlD8D6QXcW/LRSSVG1ZRoADVfegGQ8oLWJZJlMfJCbbCQ8fO5lsZMDYv9CgVkyGMF/qpS8aCLIkaKY1lbjT7aqu+6UetLZakynhPsXT1LYymnD/YMcDdCdqs1D5P71rl0jkf+u3pvJzAY3sB7K9OCVFHLgFXWl2OI8u3HAliVUPwtwLQt9xAIKoHlF5PXbl90H66bYRlCVO5XDltZMj2KyfgYaTLV7Ny4GMPlwO5sq4sGokMa1OHdR2SjkWTZQYrpJhg1Zlg7KUZyzwQfZNUVcqRQZsRRyAlyICWiKbP38oWGqBJOXgVK1Bm+1nsDSAZkETgrDCWFlm7X9dPBhieSNedWKKc4V0fwSSQXvkOfY0kZsonQh5ZB6jTT/xcbeB9kbO/9Y0iPH9YrOhKyKR1kvWchSEA/V3W694kCq1r8D8N56l09capE3NLNHO8j9gn0adQt3b+HJVQZhFqT4xMerX05tsp8DJuj6w10oUvp9qFJLIjz/8CW183QwIbm6yHSCkTw1NiBOau85fT1x6pdsMqWhCsSIrUmA8BsQ2JZS8ZhonZ06Nxw2OtSVWpmBUHyppBT6mHoXPL/NgMGdnS+bNEWh9gazRvkfR4DhLdly8M4zi/Q2KcCjT9/Q1UMpqalwC9l9e0avcW4eblmzACSu8qa1A6vgvtW0qss1tb4mjYL54b8+T09aP46nNnDdUaoxYhuYzh/nDFCPeY18pqu9+cVtqewFZJQb3Yyb4UYOURSo+6NnVgLH6Yka7kiueVJ9zXJGE0IAz8LzgwDJhQck6+PaFC4dm8d9+ovIO/gXB7g1LPjTT6O5FIJvg+x7/b4fGCT40j+t4etozxJFUaNMZoNy0J1SFNVZUYZB1puEXfJpm6Ux8ru6BRaZBChKiM9kE7OIuTkweRP9aXYfCz3q3E5T/DYFHpxQsonIOAZXc2m4PVeNTdpt8WsQs+P1lyhWS+aAeNtBNzGCiCEYztHX2+pIBdgM7pjBjpXlZRy2RhnGSDz4BEOYbzOQvoj4rKsik4Ov0Ji8hEmvy/jvTvqAVjnBffGFUewWNfnQ8WljkX5gFhWLmZ8YpZgoG0iefC/dMoASdEavN/iZK3dK4lD+b1JUGY+MOHQDTTr03tLZ8y7KuXaOYPFyLmW3eex2rtvovzJaLNjMjXS1T6F63hEDp7ckfQi+GVKhKxr5tXLtFNlI/emYai1FIws0+5RjKLYuj41oOVXuyi6igxAfOh9tfm2gLMxRMqT7NnMP2HnkQiFVbAXqds/6FN/TXZsJK5oafZElMcAiOrcLrsds+yoen8REv7ItVeuB9af8zIr7E1fEsYMM4H8VyAVpUWpcQg6+mCOwlTGqOyDU3i82AeUR7OMJgJ7f7pu8KgxCrOua63JjAVfJha6IGHrGvog5WBjoY+yOjgUCwTytDJSSOBZJtx7L6IdZcSwZJnvDhOhuKCNY6DxPFTPEluvcMTmUS/VZzDzSqvKWPr7QoYsvqo0RVjvtL+Jo/99L3b80dFnc44/36u5U2wEKN8sSDsmzIh91W3fz7sPrkrkGBkm7zbFMdBN/SjOg3yi8nMDED5dNsBotAlMU9ACXiGPj4EtTwhr8IBkFF4L9ByYIWPM4Scwt7gl/LZYGiTd1EawYorQrTRq+Ecz292fZn1gZQrgs85z8H20xFv79Wx5sDa/ltyUh/JnOLFNfnaFlKKdiYJ5nPSn5jLsq9H3NEuqjsLlgVX79Sf7lrumvpMOVKzprFwqHNraRwh/H02hA2QvaMJhBaxwdncdgyqE2Rx8QRSZtvTNaLx0hK9hRaM3PMMrlxjBmiRARenS93g/dBLe+4bP4s3vquEpWFLV1NVPacH5lQ/p46zLgQJMl/6DmqDn6mGHDR5DHcUQ6x4peqp29FCD6URHbBClf1E35O0hSUchgvk3hGjL19DMPUHhnXHULyIxYXlB0eEhqw7XGK8OxMNHOGgijCsKIO7I4u53+msToNLRjYLZiAyEgWBtJVg25HXfgB8WkKZFgP7nMoSrckB1z+X7cWM8P5wfzMDLuwYvETbSsvccaV3m0EXmeaych17jmIyV7bTrdcAmhunmO4jNH9FprwH0g7eRnUlon3ipZihwe6FthGl3sN3ubPqaXl6Vttkt5dncaTyKOoY3ZKjoSLFu5JZBhkbvpiuUpsvmQpQJybMeqq7jwOUvKTmDlnbrcv8agLFst0iGVs2xR3ekPHOJvoglLd9VPuLclpgwf5Ogb9SxocLXZeLHQJrdWVafmx00nDtF9B31lrTkkzZNt72jgGL8EsPh5QUTku5MlUa2rxC8H3Sfl5xVYCzO6PaTSHTeXY8gC0/W3qj6xRAXuxeF3Fnn4UtYV8ruxrZo9BwRtRGrw/wxUQCvNwfXtTQndOl7hcOnS4dQo+t2Yr36KXr+iL9AINTDJDt9S/soCX7FT0cpWCyhQCuZp4y726N7WjpeUnUItZp08RFshqyxiPz0cbgXsqf/TetXcLfZBdPYYShvt78WxJKIW7M6aSin4W4ExHh+5BnJLiMawBRLLCARbohNmLZwGizJjXAv4Fdzp3bMcKAdlLkvAVVgjp3XIqhT/ZAG7CQXRxLHNsduSbg9jZ+0eVEIswoJV+g3VC0p9si95PJMlRJNJHU4CQucOgcdwbK3vDu0NWAdA1sUUFnxsioE1WTi1SH9nIxwXIqZstbY+LXLfSxKFpfeUAjXx+/dipGCGSSZipQSnP66CF4NNYgTpq+b7rsf7EzzfOORUzJljlANAngDJet0s2lFL6RHQNLIJgdjw29I6Z3cND6i8hlcnBdVlKWGfLHbpiW3kFs4mcvtCymneZx/fcu3NRfjtyRmsAq4GbhDopj+Q0yVvmMISq8KgoOSz+ogB544xDz7VviIoJymIrTign/RCBNieRt3xpcdcKIWWZQ0UX2exLmqKPQclor6qjhcnwHdpzsZlrfDHjlnXJfPOJtG48NT9z6r+yA8A/wlA96LvB1c1y4RXH7p3ZZ5Zh6XYdYQ4hnA0WPvAsdUs+zOlEhiirnLpjZU9tiKCq2q4tibe9CR2KQ0nYypUSfvm/KrFn31Qno77NFBOLB2okYThmdrXOaEaW/KvBzgo57mptOZItOSfB7zSjo381SyTSuN6i2bAG0BiWFMzXNGObazl0tZ8EVQouVOG9HAQAumYeVNU0B0o8lx1pRpP+KPK6j9BYXxGzea6j4MhNYsWfuaTnrV/jCLj6tSwnFlfwwsXMBUQR05CTBJ71qGz6Uoc93XrcNlcrOX9rJwLbSCSpknzYapoNyfOPQXqqyfLHPX8T+H+0pmA+kFf1ABOccw2K+6pdk7LmzFrnaPxugsRJKjbyZSL1muMXdJ51122Z2Zfk3bEyN/P+XmdrqSPQSymWppHA3XdskLPf7DG0UcPMR1ZfJ3mpcMebXknVKz65Nplzw+dB4jHPsCOwy571OLhjj6ZF0JgMKZpWN4a8FihLjUsQO2AlRcjySjcAp20JiCxwxX9JHowqv8Tu6C/LqsIlwxajyFicZ08A49OEmI8YrBcwIO/y534epzdjKXSa4FGCHtuzh8w2b3vZVEKMRypFwxMHeFbo3SK3TphIgs+oAszp/Z1Yom2WT0SH651MnG5MrVKtv7f+KENpfKoPlfD7WtroOKQulXV46Ihs0yCOwUBL45qyMLa7W7yCpBfXjoJ15uyGIrsmjQsXTmO2xTke3ioBzI9MG7/SSTx465/jlkE6ACoLNlU8mWsfHLm4Xv9EB8w6oZdBGxuityCr2WHfynJsX/KTKx1jTGpLD0t8/7KMG0yStwktWoTSOueZAlSoDy1d2llnUaJO2EPJVpVCFpLeTSoGl2ZuLzNxPyhSTGtD4n5GyNtMYDu3IxB3I5eN9c1/msAOXOpxuttglfrx8vogz287aNq+/QucSBW/eljJ2G8MegqspJ+qJULogIQwkUAp45qkFBGWlcEqNhv4JCf2fjFymdDl5J5MkV6qhNatk1vr0Q/QcZzXbMDSJiNdNKvNu0D/gIl3c/5uThCk2U65cRm9RFU+DtsO3Wjyj6Bn9hlIM1jfy8gs9r2DQgnvqnw2am9cgBNfNFtVeTTP2TqRoekphHnOJSERZcVqTcu3wzWrAj6KjfvUra8l4oykrQXeoc5EIhAzCkwjJHp8CWPwXU/AJj6em47BLacPoHTU1RffC5hvilu2qrl9+i10VNw2cXy77a5J4dbPlbNx0C+c7mzw0Uwjf+h1CxyMTKMSdMgZN4u5Sexy0DKMHTOBzsGzM0ngfOg8Jn6e/q8Sr0YomB0TCt9KkBe5y4hIEJEBRvJ9TYIx4xP/fBPG3LjFoUu7eD1HEhsRbm6yhYlKXFh1QvVby2Jne810DUIcOa3Po854eHLVk6QCWVLfEsEQs9COWAWR54AjPcS1vgnoMssF4KbKbYyaDmU1gkI01Lsu/AfT+PJ1Bmxo5Gk2e9qBbD82EqUnFlZ/e4ghpGVZ8+3mEfXmb598T/vsd3NiPPyG09IoLxACGkZNn7JtDg6bSmpY46naRg8MXvTedQC4QlxUOtkT9S50dpV6pN2bFtqDyWtX2d/aPa6OHgtRD3nmTi3tOcKkktNrfCB3rbf9Esj3jmOECBgSdydkWQpxK8R1WpdsCV2/hXKO73PKFoECmIBdFzlOWrtSLqknErDJPgZry+2+SISKEqN8zqPUBUMok3jYr13JMOWUjuD8wgxTL76G2XLRV/VaIAMLaBdMGzPQZNlKe/Nd1WCgqu3AvJyj6AJDcfB13Fy4DuyEYltBIGVSJGoNWQyLsgDqGwEZcJjwqR2cOjdLRuVWCWC7UvJwhYnUkeZfhdLFg6HUmNvM58CirvFlmicI4D1ssSxKLcvKtVhxYfbmZvW3N2/YGZMjKeasdDD30I7ZeGdhfTYfjaBqDYxQ70Va5e356s02T0/qlD7r5jt8nMCgduiM+qBGjDQnoptk3MacuwGVKtIs39O7ENkmnArzqDnQhar2y324n7xb8h1xSf1QSLIbACPB2scQHOiOmlc9Y4zsr9MoNC8MigIspKrzF1RxPxzgTuSXiEZSGVq4BOwHAcvttb3ioK0CR3zrkrQ6g+060Q8+NxRIguTA76/KFUQ+8BAkq8W0lwAcsB7NZMsa6KiBaBIxdloUivhC/nXU0dccygqhE+vN0yr2FwDAMbeL6o7Ol8s732eYWgei5K6GByZhXdzVDHzLXsK20KMEmMzvXyEwTiJ8fJ/rJCtmbsIB1i/6vN04hRD1Agt9ZYzGIgPp7sn1oqQg650L9jUbxa8mFjWIbpXdwePE5N22+YpYWJLq+Ung963yorPtcliHbOHrxmMIY70NX7rLKGRO+5waEa7rlZ8wwcNg2Pt5R8WeVpZePXq/ChOI1uY04WQKc7RWFP0IrFj+MMmUTa66hTlL5Cn358UOFXEnc5AEydZ1JFoDwYRdJ/cglMJ51aJnS2wxwy+WFfe/pkbKB+qv9P7M3xekNdHq4zhjNiNsL7UirgsN2dzG5DHxr+Xo/q1qnjZbqm8maZU8VXd5/26Yao9uqmfb9OmG01EEZgpetjfCWXOVQSmhdclIJyeOlbyOs+YpHeOB+yYh5Slsxne8fj2y9YOTXfaF87fEBp4lBVeheHuTvDaR7vM9maFA7+oOa3ogib8xKaC8EsLzWEqirzm0sHSlxUfp9bQ9Wj9nmRYy6UmnTNTICKcko46EsY3NAHs/sFSeKj2mz3GXeAIRS4xuP4ZBMSzZ7K65pu44MwOgZb/xE1svS1+BDOBPTtqbaK84lWktO15zU1B5GdaDR1EpX0Te4VFFvpxXV5vadwcXdvSbKtiGlDSPVt8kiwbd0z7hrnpZh11nD2X4/BLvb9tdbR9IbvNnSlSe0G90Ho1bWaufisXZuEYkuKmXLs1Hx/1N8gFd0KayeI4xTQvy3I2NpMtLSC+9du0vIm5JMetDuQp/3oWCUfpOeFHPuAiAMhXoCrjLromk97gjChaR6sk8VwRhEizIjvmMhbp3ve308AMjng9beHYiijw6YF83fOTIqfSMroGTRaSUykfE7ToVq+tSFkrkPBS5R7bYNV3orsYd0AKvuAqxOt8j4vqPIL1OTNbMTkQpTOqstf3mnE1gEjW9Lz5PgMp/+/hVZUrMuFRtEpcibY06s2KaY+xP856/fIbwVtif8yeI8Q/IZFSUhPlyWdaLt7MwmVpx5/2zpsFSA8ODgU1ctiNBqn2ZyAQnvk7ZhsBm4o3/k07pQTLpvLsfVhB5KOYwavjtvrRCqzChob6pTqdj7Z2yHd8SrRcf+6faIdEpw0f8H+jTRp0ER60Nx6oxegBC5zKe8sNr1wtNBjqAKgjloH6dmCPGoEowpeAO0xF/INVXQOfoXkX4jV/bY4GWQCNXVHQFGE1gRP0R0ubxhmd1Iq65gLXwIrOJkrrXwm+936kWQ6CZdNC9MyXd0RfkA2P0yZ01hfd29kIHD+4SEIh6vqpPUuOydjNBmBgSwBFgLI9d+Wp94NVzNVkvpCk8gtnwbu5vIUzfB+ZxHLK5eXS1n0Qua8n9rWzuIVZK8tqHupXQp4G3R3mXqZXerkmLFINTNvWzV+K2dVmHd+ITPDYsSQCakQ2ByUSVknEZDzzfON0996My47V0dh38Nz5yfTL936TYmBHCYZLpGvXrg70HjPfp7oV589axLolTdOMCzKXTxvcuNE3JJm+WL2VNBJmTVptPdhsEQ0bnJmMfdkH+xHc5u60F0oyvxv2YQEoFbJc1Z9/8K2uAi4XBYhKneuzNzWUxV4rLnajzvrPMvncKe6axTNFZF/OShPMOZ5XayVMOWH1fC6bm6rWbbVSmLuajiCngbjr3URcwcqfZn93W+XahLBTA8eC/J/RUWVqouhzfW4maI6J1zHt9083CiI+raayR9OcpGRaO27RPDZghT6Ra8bzGyqtUccOidLaTT01rXElziNhlo3uR+euTOY6bqh7g5bNRaQf75ti5SJFgS09bsnlv5AWtt4kfdJa5g1dkZ9nxbwk8L1vDuUIFdf3ii/suew2WgzkAXE5ZAaTLq/vVtUN5Z8Tpzua7VDHZOMPrp+c89P0h4dYcre3PGsiqeJfdZeF0q2Sc2Pru85535frZy8ZPMgJ7fvhtpDDBdJGUiD2NOOV8taEua987qIaaKpby0A4bW/IuXxDB6cm47vhzVg6flpmvbMB8gM0nDxsWoRxBBCNYxvsl5a9UaBQzkid4NqHdSZGFhAduKxg2VKgvVRzkTA2LBNl1QBAd1YKB4aN3XZHpovtGGG5nnXD+HWTFiSLrruqt7tz9b0xNl+KJXBbzq+nMZUKoS9aFqSl5h5DpGwSsL6VSE8LrBfhqOrtdf+Xa5fQXiTkJgwrrvmnA6kWovnENkOBFD843BAPBV4Xj0opHpzagyQQfytqqLqtZMIsIOkXC5xSPA5JQcFbmFvOAueJ1+Te9XZ/7MxsjrOGf/01wWdpVcBnH9GjPianq5l/+2e4QTpsGPWRR4HdWj+FrMxNeSBq6Iker4FCFXUfV3jiwB+zS4Qcxo2AEDQYya6dl2mFGnZUfok3bpada8jcq4ElcVQ5YoyhLlzHlfl3UlpYeP99GbnsHlEkAu90+5mhhbomJryOb0hTg80Nv9+Wyl5zet7p837wg7AOIrAPW3+PMu29Za0xUsMewWh9vRlH91iCkTiBQw45QgtGetsSSar2yAJOqCwavvD28hJNcjg/yshyz5LnpWGKtDRYpcpnywpRKr3r7mGkMkdNZdyHsRxUAhb8NN8stnnKjU1lI24OME+TyfX8cq2PuRte5lFnj+BmumzX1/QnF+Y6P6kCFILR0IuhBHww8BJ3jjlNMn/MyZS1dAum8qY9pISl53JKF+r0QCXw9B0QiyskAnSzNUfbETk/eVorFx4DXvcSUCN3V71REWunTS1XB2tcJFa8STxusC/SVKl4qhFiDfhE4WrsrCEDlDoMC3PAd48Z2iQMSUaeZWa3fMuuLVIBUaBuoblb9+1PK0OYNZnbxkMMEjTHKvkC/DLzDsIo22suSafUbagig809hbJz/axYP/MxScfK/achErzYKChemofD0c0FenluFqUDBiNCegd53NtCYUm8hPFWYigqfK1t8Uo34WIDCUX74g/unT4mFzCIkYBCdw5i76sSMq8IpUZbR+eSMpsgBHwiNjbsu/8kwyncXj8XYsVt6tkEfZYbvPMpe41kGbeRM/MkwN252um/ms2J9sWuE8cUgbty/u/1YWGnkOyFby4/Fx4VYZSxrtiMUs8bxWtMcX2yqBJTm42cPlDLpIfuYiGAt+50QPc1NlHKaKjUD2fQBTYfFVKjhlU0azXxd8UvE53d8DkAtAom5kez0YMklzswR2kQS1IN67ymnnGs4ifQI7s/4De0B10nCppSMOf4kNaLJYwDLs5JWOslUPFSAFwfT/LnEnEjohaKOeiVQsB8iEsV5HaIN3DfsIztUJQM/AaaBF1XHZjLCkSVEYaEebVzvuP6tfGqCC5yrKOSePaFyGqvPKqW+xNj/W1bjVP0Ipm1vh0hT1J9kLp0lyUr+nCtGlXd2xBAtfYyPYIZiOP0wzEE3ettxa7AI3iIMEO0DUiZ2583WJNTN25xGeQhO8G9gC28HHlxhla77tjhjTJ4TLG09OLLG2J+nxrMthsfRQtTRb0wGbuK2RZhUp4OqUKqhwgZ2m/lLNqJiCAr56SG/mP85iovOsFOPuMyIxbOdwo9XDR7JUzPqgqRVM5O6wVopV/hB3BZLpxAtsV12gc6vos+9mEHCZs99u+FBm5e98+9LDVRzE3tIh9IrqGhpPbNr0OtJLUwF1DoQ3E/m+L5X2oNBCc0//+XLWf2iOvWYwjrftys2pSgxa2CVKNPcbe75hAt0HgR+Id1FoDAswamEhi+7RPEZh+JPfHu7mEv9y8AydG2igEQaXg8kqvRJmi16p+7V5DeMQK4A3Re1JeFmLTemi9x7Y34bC0nespUMH26odZt+tY0nb4hQFNs4VxtE/dO1y3dq36jmgiLVidBctMPxDy30Od1wfTqO48A5GPQzq/DgTZ/SiHjkeMatZ6vBWAHe+RWi7X6a+qQfEtX8etPE5s0l6FZ53CQW+3VPfddDsvNJJuWFDuevXlsShlPR/I+EYawJTGbu1GzoklWoDYxJfW/dZXy6Hmt1t8MAiwNDRI8OHmrBQOmYZ8VxZg/N5WLTV7m5gChsca5mYRXdQ2J4nP7MBktLAWGry/+8w7OKxE5BBBlSEyTUY5L+f9CE1tZ2jT1JkJD3UCadUm3eerFJU5tzi37zgZN5uPZUbjwxI5KB+SwWoCClazcJBj3KGcERsuhzg5WbRA7PBE7us/jfbeXUpjgx8lzQniWN6Z6g+7B5VFQqc7uxl1CSdp0+YYSh+nBwve+ZrrPiCMPJbVlocL6X2LRK0dx5DFtCcxsWFFybhtLwAzOc/ILy7bRNXm6pJcv/W2k7ybrIncO+CDNSTOas8TUR2hloj4gSu5cHpPDVH3aEa+1JZXuPdR4TTiD6wwO89txvrbaCUF4Uyp3ml6i5FZTDBOS47asNXEHB6zlV4lrUWJfTTPF/5lEWVROu7J8es+rFQ3fe4g5Nwm16Brj4vWD+1l/UkL6sYTxCydk7JHdgmgWn4JQZHY4t7ssoQqYJTxdRFCpTRQwjYT475GPcMn3RUrBbkqJ1puOZeeBdTaVGv0PJA/9kc01Wmrl/XY2C+EPXEKJ6+JXMbrHrJEnPzCq6griYpBHdv2+7fllTImd7cU34rIkJSMFyZBqY+dgjDruvNzV90kHqOvSw3lSGwWwTg2b87ChlP9aTypjmoX/t+q4Flwryeo0Xtg5JyxHOKltK3s62w0730Ke48D9v/z4BXbEInXS/1XfcrZnQ74b10/d0r8ReMmTLhuAHRhqc2+vxko5MwZl81UMqSQjca78KsJ12G4bmj8nzWx8juvSCstWVK+mn106UzvT+WEvrwy4nMugMWmtiGUU+GMc9mi8Lt4nEY0gYNSFFw6wmtKNyjI5/flsBB0iWi1yH9M5nF/d74tziISP8w12BMw2m6hcE2jVeQve8JfDiSo9Ch/crv5URyKWD9pndXz1qWKJ8FFHJE3AanRjqQ/41cP4/Y8iZRFPWX3NeqX99U70k0i9Q+4dS95jq5Vhzo1Wpbitv224qtMdO2+lNB1eo53f72aEr1KX+66JgfVpPrpSx9eHuGv/t5DGEYCpTbQXslspow0QAGnbuz99Lg3tS2hA6C1qa7sDLlHKJFKRdEm52H2GurnMixbbfpgxu6jm0ZkBfMluKKa1ZSYR4/6mR1fD1cG54/X55d9xEZyGCOdq/5uUQF+FVWu3ZBMCxyEWqIWWWCh9P8u9j0J2Hw5ajgMENdQmyBSfWd5e9ydGWUUNCiGEah7YPFkk6AhXC+dvIfx/m7PmHZMFghlAP2e/qXXeyvddJbyADFimEOsppUf/mL0KjuvvFePpdUfcr8UWRZkLn90fRe7Pq8dvJ3lT32ulGgK6iShkXK/8i8OPxznCEza896IhFdx+by6bEDmyDVvSs2odWBqxM8atmpR0DRGRLvUyMf6zBNVe1Jvk/WVTqSVYh5uR0Qd83Tlmr1oRKacatzJAsuuFfwxQCju+zE3BTxy/riNeTquDMAt9u2zFU3iBnLat/Ro4Rq6iRRJ6Wzv3tYFCEEmXTMADwnl4QC4RhmC2/DBeDq+FV++ULWLpzQXH/hFfm09s7vGJq+4ADXgFNbuHMj9U+90B45T5YonjypAoAABjC0OknbFHbyf/95EtsOB6Ao67KmmAt9rg2NfSpxZqagvn7cXB9sL7GsymrPkPde+HfvHfKGp0ELpep1N179VXD/4xgrH1R/y6EFcrFIJC5ipr7IeF4tXe29KQQslUVSc/LC33kzg90E+53hx+YghOer7hfvMFuHNC8te32vfwtaRCDUFyvYnFgCvrOnaWrheGGlxVXYQ94MWgyJ87CzCUqyTnzyIvANd72tssicn3HCvtNPJiAG295mgYGn7eSNvMU0uN7tnInowEj2pSwTWSm70lX4fx4EwOJNYhx2Pc1LT9+Lo+DdeOpSNQP69kD7fVM33pauXky8jNF2bt2ufKDa0JiGoUbKFJBxYlwSpL/3xI9nPmkt89lf8o6VKjQz42mXZGiwNyV9z6K7zeMVvBDcfsOHyl8iLCpRmRT6sYV9T4zt8cSA9F/OelwJTi1qeh0Hgu5UqhE7KlHZrW8leub4siKskxwzt3ZhaCjNyzP5R8D9enEdfLje7DZ3Nrt8L25014fHZRUPUyhGI2Q6UHiYbPjlKDfBMoxHmN7/JKDT68ZlqG8A0WsesV10cJG6GL7LYlrrvANvTpFYTtm/3NdLDwEL59TNe0F6ZEotVbbXsi9gb4p4T5e0L7GtXBr+w2e2TYbr+o6cybSi9wha3Wt+jXu55oEpKqumu0o/+J1VLxLY5sQSHJV4xHkfpL2YksUV5nfSY5nA917cM5pte9x1k7gZFsCS1pm4P0a6TxDqfS7a+y7PoCJ5h8/prRZWqU2eLUCwEyw5uQ0jH7dDNeAc2042kCB5NIuW4u+WjSAo6euoHNQ36ImNnQgsSBmSmqtBBGXL6+scY6l4LZHIqm6Zc4I/731Sn7poCCf5SKFYK4I2L0ejikAEAZAChva5wVmECT6b04teB5pQXmfbjGgUFO+X8ONTtD/k+WC8jAQe42835x2H01eDg04YFQ+N5Og5ogc/QOT0RSl2c8xXZWp5w+at8OgZGLNqzxCgxYePoLdRRE3sgmo5+hEGf57u6ZgV4aduvtoA8JPcpR9RSDKDfrFwcJOnZ4fJUvuknmbynRWm9MmAh0dCaFEJgoG2bv33AYsi3QIz2+FOIFKEdlWD4Q+48j9t5cSqVxGVHesXsdXd8j9LABanWKVmyChxmjiMcvKgQvZxR10XjVX0wDnMCBwUzxSlJO0SCwgWtCrpEO1/amHNIwQI44LIADDRXjmDlK3KWMS/RS/hr+0ZcMnZ1FbYUaB9w8ZWO3rntmFsMdS/CmAELpJyV5kDWBwAo83CmDJHcXeaau8J2EHzI4vBEnGVnDpImHWPp/c4MZ3VqXCG65OANIo6xqHaiaiDdo2Ex/D355gHBLM3FCDWGFDYIpqDPjUNMn9T4JpT6i+C6WIz91W+BDIyIfu6k3Wq0LMM24VNaHp2zmyemPgSV12w37wgEsPTSXREFi73I5F55IJuyGa+t6V4ppjQCoM7mp00+YH6XZOZO4Rgo+UXyvTscNbgUq/xrM1rqHGyOJYmOYrwa4J2+XJIe40wOSJ4tzw0Azv8KoCe11aDUPU0MhGwmU5kN1p4K6MneoQuO513sN3WxtPeXzL0hAeXNzRc2p1JMSER+lG4w+qh9F+gtWFQ8SF3OkIMaHSCuBF4rsup3qtEWj1cWRQsEGPGd5/LKPjATAZrAAmJm0g5x/pkaT1HX3tVBeKzWveOiX+9E7CUYitoNsZWWYWk9bioCm4kpiogpXyCF28rh+aOoVMoh0iC5DvG+YJmuTve2P212xBaWZFtR8EfMRx8RbY4bWROnIai/h+ntBl1czARBB2eQC1xQ1svfHOaa1NLsqVe/1tpAsI6meYXwGjwXRdxRJ6J3J7baro0/nmOV90xjTOo5W6lcEGM5iD5/DAMBvixE2dkrQIGB/7LVBJq53YoN8fXL2rmES6GzSS3N4oedVTV4jk2CM6vCgyc6tpVbLtD0FEqbUzKd06tyZMSWEZE/dbV9iYdER5VZKviRzAotPvkaVL59rAIZ7cIpC8dOw7e0cKTzN7uXuxGgPTxrhTuJdm60RYIR88JBAZgd6ApdYvLwClVr6dpNrgLAPmzcqGI/1a2GYrnl+D+x74pvPBkFlIUpHgFQYlg7m0R7siaVkD7h7s1h0arYHrTUZgFPEQftoyHFeQZYhveGSbYmxr7ItBOmgRa9tLyrlrtiCf9uSRY81fIRh0Ly/J7G6g+slZ++nRVXnvtpxPCoycp+K2ZrtHHv5GICveuc5N3kAa5ds71sENtf47D0i2JP6Qr8ehV4Y+wNH1dPmA4dEU9Hq5B8KZNUEs35VR4smSAzOcOXyQCop9Xucwp0uSLbHyzu3WMm0dkIhifeHw71EHUA/H7gpxVZH0/z+sYwt5Mf0qKNP1NusxwnQmqB1ODgPjp0LZJu/8ETsKirP7gqF8QcI2HUPFDMh/vQpLc724oT6Bau8H9X90xo9qpDyBJoYcvA85ZWj3zm5A+oLezCrdwCkzIDaRvR0hoAdXXBmx/ZsHc5argdVzv2jCZhbM02IH2sE4FqqeifzzRf+dcXYG4FeWgq3s9WnIt7w3YjnRtbtuxQz6kdPQqjnW90/tQcocMxanpM9uPBumn0QnBVjydJq8HfWOeSqwSyogWNo8KSSPBSPiQUzoOEM3JCOlHz8IdBqpTNxIBNuUN/xtzdEO156AFN8UR5Kp6PlGONhH9oSBup2wGwITJb2e1k1XpvqOq/LVRj+3KqXoynxUK43xj3d6FLs3tTydIZjL3PRzlHtm/twMgH63lmtcrlHLaTc2YhC2i7tHABxBzu1McJqr9fB6iFQbDCh2saxKa8mCZGmjzAWzbxRevq+GbhDF/WtZz5CP+9kuFnIhTzmlvmM9iv48GKwhLbqbfQXb9Y7bEFlmVK0HtKx3kpse7s95wSLJAc4WRoarGRGB3oECb/adsRMCG6YqKx0CZcyuv9iDCAKubd0S73BC8t+GuejCCWmlSbMX2zmFUaGKStnSuXpJpUFwFq9WBXiP6hjCT1lBFY9SEf4NNgeZjurgdLUCeyCkiNCAOpdiocjkMSBJd8F3BbdTKDCHhY8spBYd3g0KekcztKgov9sGZ3AjVa8QqYsvYBcSZNG6gvBc2MPAgFjk6vnH/G1JKAa2Gx+QLRZVQ6NMWAPAzK6CqByZUkg3hupMuknHoS8bEgffzQOe4I9Z7GPIMyW3xKm1QRq1LnOy2k8zWyXAGldnwmGYx44WSvJ7yTl+/hTjfT99MoX3zgGd3Wi/MHhVN9arUB2nD83Vu8uONIn15YwtllPweh38jYxbvBJeUnYs0/tQeQE6C3C7rMl6t8WHuB1goaXnZU03Uiryc1DIP4G6CPdO0jj+5do9B4xtggXtuQPMZjPf0DLh6BDtpm4a8LrrBfUe7wzMyMAkhsxVbC+1OwK+CRlvNLKrSASgQDKFz76aGKNPdBc+IgrlphQnuNhn2LwUI+PGRezVTHG0wS46K6j3BieM8NTXdHp0C5bCnhQf5mNkReue7i81nmbqdrcQsdKrxa6jMDx5fBtgDq8a37efVosC+D5aGfKpFsjBHR1Gnala1sIrq5scxxsnTfkGbcS47LXhqNs5i+DrP4HXQZVXkbEt39qiJlfmfwR0FhiXjam9KV61IEwASm9HKlMleu4+V4eaqcwpDSPH+KFyo7qJ4cR7vGECQhlg/Cnjh47RosMVxdpXL38c9WGTyR9yfnkpuEbZ+g/PxfPQwJRqSvbyYmWxdLmTMvjKXlE4JjqiUoq92AscINPCqnkODZ6bu8HlYzSv2HIYGj6TNI311i2GK981zJW6jm/ek4JcStlmWxHbcT/MDNWKbLN5p4pgXZrdwgjiWCWU/V4Mrq23l4SvktwVFz1x4RhKf/rloqOX9ZoK58dyevpcaz+MSF9e6sH+1gC5V9G6GdBR9muVao1AwY7/I5UJ8/BX7uXcPpFxQmRbmyRsL9JOStrNn8Ds9arTGbGhDQwmvzehcJGlgHcSgy/ppRwiGxy5Q49aiFds7HAINNI7Mtu9ToCvIJzX0t8io0LKjAwrL7EafmAXAu8EjwA2TZoGPpzYgw4zhkqM74cxGEJ0IYVHwA4uYpej7RPzisvzk0DkhIt98YJo8PHt9bgjaz2mEaGJaxIWMXKGkkkyENRzIRVGwKcNqJ3R1LEK0oESMymWoKZ6vtu4LIMdfSSCAYYopEz0BOp6p+rdzd7uEigXYqGLg90zD5zeuxFAQCncVw+kSRmuKfQCdQX6IgyPqV001MMXqJ+0r/l5fvZZE6McFEyNz39Pjv0hBcg4tL5l9duDYC61KJ5SvwGfvIqWHbXViU0yptkPJOcBdGlJZHFvX5u3bC9xpw+rFMGDRfjLoHvMYIqHlf/62sTzWyUKkOM05qlcKNz3e5mKpEitWVpIwKNKlhgb48IDVPAmi6YevetAEICAWJbtRgY/hl+oQEnIx5w87R/VuqS+aKvGlb//2zvBXaC3L8hn2Q17u6hAPTYKCTyUCqsn/2GyTd7OHW8xNO0hoYVFJdcLb7mQLC44iYSdMQkJXlm2E8uUJFuvHdPQt25NbHEA1UUs+vGcoXJURosTRJkJypEOtldeD14NsqE76voVgGPtiMlJ7n9TmyiUJYz4FUxTseSos2C3NJi/Ew4Ey5awtWtEA+8Z3Y0qe6700c7ZvQGCaUGjSfQx3CTDroYXnGDRKMgHrHiSj7zjXI0SxpAw8t2uIaTHViGFv+zQlX/65PmF+xy33rN38R4ystuO4ynm86WTaPxnlE/94EfGW70LKad45A/A+kxek99n13Cu8NiQ+t0oZtYPw+1Zt4Ni1TxMvzKDsIQUvQD4vNLzktyAgwZGWYTd3YbpMXY5vj3movN6S1WM966DvbvO0h6F8FK/sx6PIIQdd4uiKA+YjdKeEROFcVei9yfjLLabJg+cr5LAbOYlH9/GgBQDDOpJPTCKZDGkT5W5uQpv5mzaM2E9tyJ6hrScE0hnyLftgT4yVebRqp9oZ+FPbVkptwYFK13Ogb+W0xeCmlsyExXgQ7L9a5+N29M/cCoa5/WZrDqO+lso9RZDqelY5N0/SO66vZC2gA01ZL93i6IUU6trEWjTSum65KWQzLYQOi7lGPO6GpEO3X+HaHbbnT/WSS3C1i37d73stE6B8y+7dBP0doOb+U2tvfrwdWt2XMo5PfBTW0SOzjfwvtvRYcxN4h16azUZMNljijTYL5icsSjNfD87y3gdfSIsWZgqNp/uW+oNEUI65UtmBfCPVXiX3OwXY148Pdh+AI2v5Uw0f0H5pmibbRpKFyC3f/mmmcAPCtk9U4YRrnZMzN4zLrCt3RHoR4T22BoSYNk0gw3MgR0QWFyA+G3trWLDeL2829dHmX947jwqHBt+AnPFC4NvpvnCenuePp5C4zBapQjIJkzQGkNNUHnUhB7a3MVUGyuYgHvPzfoJAR77P8e2Ye/8tXoH4rPvQmKKFZw/wcSlue+qeAXo0ktzlghLrqYOS7lOEin7u5CE2tfRn9ZgA5VwH/69JqBlS0TH+OPxx51tLE27n/xK0U7ohGKRsRV62gn5yAUrMAJCZKjGEygWqv0yFuXzHXDOFNVDRNWY3kduD6BjCtgf81qnwV5PmeMwBxg97Hw/hjxyckNh6kzaOD3IGzGrYz9wPXfF79879PoWziz2JQdTQ1bEc3GGDwziZcjt4nHoJUMjySTxMzIfO9MIrUvIhA+A/5POXsZyVni9eUJbY7gMn0fbkAXxs6jxTb0Su759rGYxsBGOG287LndzpfKn77Bjfayb3JHzC/vNcTZyH6MVtsr3BWZlCj4Ie1KhiQqeVESR/6NgUZoT0bdk0FguMlYRePr5xe/c1mZqtYG3rECLlfArqzjz+/p4LZOR2YZMCWpjmmHfAhyc9R01qzHeZ3QZXqWqVHu3gimYDSpvMq6Vo5FXlGJsoNWJZmYAiCW93AOVLUjZzedtpmqMNuaso0j1p6RynNu3JAv0JhWpBYuv1xpE2fHsG2qe8bhqSDU5qETGDbgpIxcsNviKGzWCHEezZvvgmBxd4mQ5wKhNnNBKVbeZSuXj/to5T9ZOF+u3Py9hI9Xe/TzXr78UYR06rrp1TySPWfbxY+qFQmP79JBeW73w5xwEpUZ6V/gsfDbigV/RPbDHsmWQH+YCqaHX2W8o0MrtyO2m9MXjXkGsYvRNEYIkZAHFwv86jmYHHOw+Ybw8kXwhPY7HAsw0jp/ZysAiLFwiE6/5cmtuMQISiSr9vIMzuIGMkWJRKe58zAYUjNfBhJ01PG2MSaOnV2gJenY/KJBnge/SOPNYxc2vJbtkSSvT4YqabQZBMw4r9LKEJO3XaYUePxo7pAj+4f/rtwWixWGjGuRaRRsJXrnfpdv5tuGZbb4mIua5Zk1H5Ntdyq30AltrLuW6exM11cvukZHPICFdeHxQ5OCZLawE2hmlPHadYp0eWRG4Wbh7HJbg0UlnzvP6UEjWP1oAimwnQcWlA5VH6bsKKe4daXAykJDSPbWlxElMMzfUfwJJtzHcmOSq2Cjcd3t8JYkTvlL2nT2PO48Rh1j6T16uKTi/xcqpcVkQv5zSIwfVlAEXy0psZMGZ10OCE3AFAzZp58cZh8MC06P/azxZh4JKb8eeSeIbxjxYDkCsG7wbqs6w+2FLOaviJEHzY2z8crLz4sytBi3G07DrFQB8miuEwdXoqyAPx0o14fLGFRlaF37cjRkjioWedwp3NIe7+e3v8KddQ7Enfdb6OfyBVGS9gDBkgNcXxmOC5bFUo7Vs/KzTxtJZsFg/lVp7HpLIAs1Y5nnIzrDbXgtQgkz1HH+ZdJYtlV1UFJ5ytsFX6YDDG1J7Aqh4WbxvPMYlUhtsLjppHx90DeJacKMQFdgOFndX4wl9WD77S+rOZGxPYmG9zeQElpBIQSA+AV65ITAYELGmj2x8PY+aIzuvNoG9/dsdWCZgmWJVxUBtavAzB9te7tUOL8ROTIgWe6vTccdc8VurlhTM4X0kQ8MXswadcX2JiVtCAxyt8DHOOOK3BgnHHJbjgrCj3epn+bgqbIjuNCe9QPgexJX8gKK9XJt9TqEFDF9Hk759zJw2U3Y5s2/WbqY1CM/tpIkqNsM19v0hYcxmgyoT8WZx19ghz7oKqC+x7WZGP8OP1YTpYz+3pyUD4Bf6eJF3Dh60EI5WpIzMpK010YV34lscxKJ+n5m7Qr0T8kT4PcWELOPV7hQkAzs2KUubGFZxD68BMOsvYpVWzt61Fkr/n+LgqQN7JNoR2J5c63r8PTvVwktxzk50L2gSl9K23IVZI6MRQJZYspryrKmLW+5kTi7PCOn/0uQPtmAePzpTJ8DmYm8Kzd+rnP7lVba95TNvt7bx30zUbcPvstVVFb17bf8RZ3pvzwX1NExWovfiTq1pduJbNo6BlQ4ARLE2712O3689b+p6DKyDtvv8ruvhhZCdjORb2N0XME5zazJFJnXqabNIr6Q15GTf09t7MIheTOfkOHAM1vN34lDkZBh6Z6Z9De+0Xxsq3hC1gJAYwpRafO4BhLom1KpK9C9Z+7qImRt5o0QEmVfVGXgbEuaz/dME3XHdq1MBdW9cn5kWcQIpvSLFru3fJUylfgskckqR3iIkjsofrDPLTuFrXAgb+jMjN4qUkiIv0sbt86MHFkJ4riVcIhcjiMJfNvalZng5ILiNM2Zc5J10BS3RAoN8P5XRrVx1wykVsTCHsuy96D9BCeXqocG4rv0cqd0dA5a2OUQso2jWRuGiiMnwHhQ1vD5Skl/kcDSFIdDlCObPopo8sePQK/K10dkPOsa8yYmEfv3InWmMgvxwG0IsBo5X6VDp2xpmArI9NyzWGHDl624IBY1XvLlBKXXoY3dzj8o2bUhXWBS42xnXb4mgH3MWyEsal7RDtTHyjhVkSSvUSPY+R3Cp8gidEqc2H6+m7Bzq9b+3qeNzaY1uhdAhFzl6y5u6j+Ft49mKhLd1AM9R6UWYJhfmXA9udN2h0axpUxew+hccjmwcrfhsCeRlWLn+SzzFZAhHl9G7OJ/Lz5gHeXCQiSSUK4y5VjYPSM7aM6KYIUB2lTVWAdXByjNA7HKCDKrYX7IP5ZurygQS/7nR0YToiyYJ9cAJML7j2GiUUdzywTeUwyB99hYYfDfxIFKdeCynxsxsrz6NnmaqkwJIbQXfQ6B3CbsVO/EWDBS/7ogX04L/visbgkpcv1+Lj4WRjKyB+tUhELzkNjBIx5V9x5/SfoY8vH1cVofN9uwOxU/YwWq9S9GCmUDgKWi9WNAv9W4+2oQMmWviBp25dNnqy8y/MU/2Epj40vAvMXTAX5iw9//W8PJqbmOwzbywCl1463cnXXw1Hu6/Kj/Z5G6xcvn3FmxCg8mSWezj+zMtpUC4fiqQgQ0YLm4EjU7ZOy5xerq0VyYPX1jbm1XFRoiRzOvmsUXmq39wAPtPXldTbpdJ1acKVH97Sm6vJ0n1LQeeAvuC+EheCi/VqT3cx6LItjL5S75lXZLYal+vnh9LChnjja3kdZV+Z5MCI4PBWgYeVAGA7AcPPw+3XUb72SN6NuXNiiKoWmzn/iKt6X/ZhzcD7yxAduWmSfwPxSZDhdfN4AO1cjwXgKZw0KAcUioZCM4h1SO6GRDIJGlMsq/MLUKZLxcN2+GbT9kROf8kTOx5MrjyKedw2+SPezPhbLHdxSyajABTmXQIAoQSE4cNJvymZAKxfz/zOy0aD2+vl6vZgl0KfGNImi1uJxcxZrdGmGphyzW0noDPdt96a9dJ16d2KGgAuOqB31WAem5WzwcWVRPcdm4dWN4681PIxqzz5HVjz+MDBfhrtTWHdRRy/3H74nZ3NpluBqEgDaD7PPyDxYrM27GrgwUvsZiuwgl1zWhoDCJX+cfoLqcWpntjXhkvfQnPCR6QP5HNwKAMEzOi7RHBFBEHn1nawoeSOkKR7AtafvNEs/ntD3UmDsb0fDHIjkHlnq/fS5Vj0WLdgkBSUN2pDtNZhD3sedRTeP02BI9ZCbhNpqPaXbcPqqAiUMfEyIYLgemLvTLWu9fDtOdC4qI6MC2S2loluQIoLAbP27peUWpQPvlrre1t6+JXQtt0Tl//5zZPm9IM2lSZ6k8seWK3S4TZXQA/JNuvx271TAdhMF3EBYfykqj8j0JfGeHmzOi0z03z/lor95p/uhbq+8PF9N1qmEivKU9DJYqW0ynJVKhIUoFB4BwSrGSeB8uobXBF4UbXwnF+9LhCn8HOUpFnwP4vJTXcSSDZeMu9pBNEBEl2NCJrw7B3FLjMelIp8LLk1i+4a2XkGEW+2VYwV3LsYmiux9wHwqSa5ybQOg2taPQR8MFjw63wWA3htIZ9ruPR10TfuAgLDp/PQyVjvJOCP/WzTKh3da8XCfy+TqezE7W4GVEBp+YhKuw2a81EhJzdO1RpbRbYwcC/cJGyf/1Ur1SUrUx4tQhGMtkck2+RAVT3WpVfe5tT8yfdf3NYTVD6Ve5xFde7j6Drm/yi2UsSmS8KVhyWbYYImH4tHhZBj/rDDpUqow9WLxl/HUw3yviDEhcmppkvLxWvQDu9yuJYu04C2TYTJT1O5yYnTttm1tra83H+ZyID5cDUVjlmrQ8dJYKD40LI6qd7CEgUrxRoZ5wg10j82GvqOxClUTWTXoHssLUWav0SAcZcZfDnyXWr1J4kSD55sgYxX291vNKcq0b8f1HwWHoqVxn2qGTS0qNeVK/368QgJo5AYOfxhRY6PHX4cc8EuvZDRUABUtVcb5TUo/bhOcjERaECiRg7xdmpS9LTz684mXHpwRb/chHJt7C4Nw/stqIeWiVT3HV1vTs4lHTGlX2a62f+PQJqZcd1OKcfkuTJm78vRxIdvgE+17I+wBzGh7FiM5ksVeXUWxBGoZ10AIIWt6zlU1OJrzSPPBWWoIojyrdIwdEXKFC9HqvyvRzPbvJTxJN+7QKre3P/dolhl8EuhY3LDMXS89WfT72ZJsb359l2QCjDpD5NoJA6auaR1VK1gNKiVY2BV0s2RdoMwkJv6jDzONRSAhI1//wB2dWc1NZkNtJbu9fkq3WyeCPyYihAxK6GlEEaZddQcL1nSc/0f+9FVvxMUhAGyhrUkb41cuOpv/lTMnMA0oegRiPPIXusWKq9Xd/ZJ55dY4mrhJmHnx5vX0+amGvl1ZPELvU2oeKgBW3HqEsOYhRSRCYkXg2ixJ60JC/RPZuNCUP/zp2lC4kpOoZsuHOBe22AG1FECIYSmDn+r1oCdynNS4EhdKXSIy9vxH9or+st2kcOZIg5MSVQayVY+VovzKN0MKMHVfbV+FNlsC1fa3ZhldUOnMpf0yiqoqZEYz1twTd+wqOsicp+AZBHx++Wv3Yga4AZ6AVgc/sEEXftV7NBbBB7M5F+Y7/YnTYN7l+2h2r218G0uw6b9HfJqdZ5wQT197ODA534t8jwTor/u2MpVwpVoqrZWyNl+a2m8epLUVM7x5WOAl5iglbFo0mrO6N3acMGUYpJQeQfaCVBH9Lef6FIT3dSbEwIcttMs6CwWdA5JKzDmKKj1LoNwKyisLIrw14wElpsDvLKcsTEf2oMjD9Seehs/tjwcK2mIF16YirF3KDCJr/E8KW7qwtrwyGaRE0HCsbHpIzc0xeY2c1XUkynqaB5VOSmbaR6/8cxrfxJFfKZQN7OUeMiJkct5Pe9QsgXjFRseQuYEZj0ckV1XkwGmSCipl1jFv4iWKMzeJ84El5ZGxceVANdYBcq5aWOb+nRbqEVkqLOKmi0b0O87m28wW6e3QvwUxQqXZ7I+cYOgGK4dIBbEP78d/fSLZTcTW9zeS94mnM/+rw8Y5iOnTW8SkXN3vgA46b2h3bI91cFcRF5xSOVy1vUG6QlRS70it9j64sOINcJU/HHkAUcw4nVCuwYrW8qF81ySHcb87J2E5h5N/tjC5p5DgJvChUWlMgQb2FO815tLBFlpBMJW1VX99xcabSQp+2NUBj/8n2a+FyHa7Qj2WFImynZICjMBMTyuglbvb927fodJ153dlhIHGnJtYDOU3MwN7YAIwSEyEGbO+Z264IfK8XhNn2G4h0Xf4n55Fv7ulAEG6ut2xaFK4uyX9sJeWeejMhcICmWsosUy4b1brDTXl0kTpaGWGaXwTMsNBYQSZp36ABmp3Dx3AXOYO//j0nyEHSKa0v5UN4r559lvheAa5+hjqABEZJ1ORNtLgmFcLa/4/lLajmKdOQAsI7p3483axTUV11gSAhN5n0+plIvf63ZlpH5EJ3W9cxsCsOngZiRXIqEAciZP9Oq7a2gMq21Fxvhs85QMByTtiqxwFHjcrqPaX4LCSKqvBBMpkXJiXeH0F9X+cTNV87I0xFJmgXUU95eYy8DnRI6JyWHzHYk48bwz1On3RKkMj76/WnEOFsC5rBRKQCzQOCfdWC9tYVC9F/hYfKTV1IdaFzKFqlj0gAi/p0rnGKwadW0FRTDAwyIPeBN2tx4rHSXfYhx9HwfNBoyuLtCZwyENwyFAGBYC6sDZQqIxczxvRcYlLwADYKyAmr8L/TWa6eUzkWj/Zo9m6VoRjk1r8qC3+8clp/PCNROBDXRs3dpJLQGjHVzK2EpXEVckgBoh6FsG+vc8WBeMuSM3KqPV9aPw8Q8UY9ySPy+L84qJev2Ro8DlcOJ50waayxzIsSV687tl7O8MfyOCJEwZfz8prjpnW+DDqIk1uc72G/tiq60s41zPXJxsF9oVHXW1mWPmHrzZny4vcTGlhjVlC4cQ0glFovoalADgsDvcM8J33MA6nmrSPHe+Iu+bRIGH+uGnnLF0+zIr9M49C77aQWuHl/ZaHww1Wj0Ww6gxAm25pAoiQ1yXjvhPDclvP+GVk3GmyUA7H7K/Y+GfpiAEWUZn48aIbxWyTrcTbTufncuc2i0LuVRvPPcKjoGHpjQcSEPOHK1OSqxDn69hTzT+jfyrxjcP0JIreBHnUpMDJYFwwD+uB94fAjwwvmmStAkfezB+RmqIUR7s6pbKPjfP4L/jQVxray+4ijVrq9sd2pBYExkCTWykqeiyLhZpBdpOtjeMQ3RvrrmBQ5sNehsdkrQwBBK9PVajxbC+sZSLkZtyVvmkDItw2rgA5/6urYXFVKt45U4Kpjg+WqmnmDf1i0ufs2iENTphScLBfWRhboYs/mpn0+SfCa/GvWMHrGxAVK/Ry/E063x/aJwr2fRhFC2XKpyYmTT1ln7YA73x0NEpi4ZvoLl5KdwWXZjvsmSbewPVC/XgMH3aA5K0wRLfa7gdQzG8phQ2k9CypsAO3X09a/3K4i8Sj9teku0HqcT0Hiw4SKChHxcve3eH+6KXc6E3khZaGOJajsG81xA7KlSHEgYLjWbfnZXKmJEiXL8myaRW08dczjaFGan1/qZBHv+4iR0ay9exsJNKXxz6yLC1S8dNCgoUXiDidh6kD7R7GqCmMvRVcvR+0CS6I4tgjmANxBUs/tmu1Ushf1bqhKp/3/XYOeXSxXpF7Y9AYM1XN5Ql6j1JADTstb2LuEBnHE3M3fQB/F0rtxfh27lw6LdBRBE1PAGYGNZ2aa+ypnV3XjlATG71AoGqYBeGxX1XYAiOMOuPPF0K7IMQ0PILHzW+9YO7hiC2ka6zZSxuLbnGJVEzSLmg/xMHVWPzwKCO4WRPCW66Fu9EJ4pMRxDh2tez/v8L1QhljyJoDc04pc9wYDOWRD+P+nvhSKcjpWqhfQanczpiAVGcGC4RiYqoU6QArb5eX4ms/+1oiHFT7a7EuQy3MrGq62WJmZJCXzNLr4zGJNycnVi/vFQrDQU9g9V01Hla/88cSwSObvlT27lh5rm1HgqIRUZHlYq2XhKKyTh4cOD5Hd41X5YmfaaqyOeeAZRtF5pu04Ii3A5WHg1m5usxpWEpZIXOqZ9/zLpB0h7+US/notWNXv0+cv1XINgPNxQVEWCyQXf84GsQ1cDpVCFigReisRtMQ6S2UrzBsF4427t2UEnST2CKALDIrFJzTEv/YPCKXdGWHPytvMJqkFghya10GnuJjNUMtoUIt4DD9JuK+HdYhZ5I7l3UzTlKqdolDej4swPruxt6/ePr6gxWcaeA8EzwftTGVtHH5XYIUMpnJah6CUK/DawjS5YQMAO+WM9WSJDMeIRYkCxjxbNqDORYlY40nbaaZomlSLKbR4u64964R3Q7SfGQ7FcaSEJqF9R3zhGRRYwxU4AAuXEJx/P42NZPFzMQJpb/OH5Fv9U76j3Q71X6Enn+KqZIcGfR1jnBMUw/kIlY4FIbAVRjnKgz4IlTXEYjHlINuMVkPSV73Y0onFISXW7U/jLLbBDPsww30OlKmd4TNk7FZ235t7VjqBoF86IusSCoyxf4KA3fr0WZhjArm3SG1HXXHp3L66G9ZYayi29D1MTrCJpeAMO0Ygqms5ah40CYnpzdDptPgT4ZJs2OZDsDnCD8L2fRtd0EE7xzhdFHNK1iLhJFSMid2+p/7YxPRjwlAm5ufG0zbq7aUEoHq4Yx4+HyOOkG8MOq3pUH5uhLKvqXyq9oGGr8Z4KNJwi/3DAqGvv+nlQ6VvWQYxKk4TJHFuK+i5DLmy7EnLoA5fSsP5mXNIrdE5i1tl8lgdd+ImnCF1SFXbc+22aZCciI+YjJXdG3eC7i2LFgR2cPvFrt4mHybwa8pjO0wcDb4kr8hBCTo120VQx9W9HFRnNooHvAy58IDJgYW2Q4xVSrMxSc9BECIsnoZPFTzbcQZqtudJTaD6rNyrfksnRfR4GO3puh7IlIlzEy90oVd8LewFJMEB5rVRgVLEGl2H33nwwNIqSE+a1gVPoy8G+II0VrwUIzfN+f0wHkSOqFZO7GJJVT5tSd4gvyfVnwtN7oREoGkJEX4NFSBR2lzeuhHLmIwSXi/F6+I95kLXKQFRP+sWPyYshb33bZvhraXGkmyQ3mkwYUOUPSn/x8UhYMSrI7mAnc0+5Kg3WzE3Mh/fQTslwvilbx5KPM3C5yIu5iMFWWqCtAw/EKxI5gTtv+fht0eGbdxReXBMyhWTWe1YJs/pw4SjMYDAkkTTnN10DprLQdVQrSwqqotaG8vgw9wmo3Ebu4rHMGiNOF3pF3M308cVWPrkYfCd0zoMqdTBZL17CXpv274i6ZRB12ncqzSflLAfth3Ph/0+W6vNJhLRipilH9Q1bN0MmN06AA1BVmP1sIJgmiyKpekVAH7LnZVbHkuWLkedLQWUz5irasWOjrU82griy5PPtjwjOA056RvQLjOS3s8P4LFH6sgqleeC7XnEtYBCRgeEmDVVjrhXfmCJ51qbAaHg8ctWiT55P5tItQzjyQ402/0u0fiKuWjE2SIixlIUm8kfAkjE9ZmL4FxajhPrRPjcpSD+EWFyXE6cqNmt1EcJumHIWczPkH+4iUeyVKCqOSi41YBWGZ+K2/ZTmc4tt8o9KT1mhAI543+bGwAAqkLZtoliKTLhOBWQvAkm4MI5JEmEYytgy4cn/qdpQIu+6SJoNqLt4jx3DWeo4Gz6+tOUNjltEguDFkV+uPQkAWpDvB23vydmYpx4d8hHdzCVpzxvTQk0R4+NrW9Vx0/a6/WuKYpyBvh357z+jg6lCM7kHyJfsstLRA2EIuAm2P6jZJQpSmESMQW040gfpN7RzMPVwKhK616+B5y0P+nvBG3qcUiSjH8W/WaFYnoxDdS8W7zEAcDZ4//2f5JsooR4MmBeWhEwqYKmmYkdaj54I90tZj9M5Bsc6rLPV/cJNbdUVlK4Kx3H6WeaO9GoprHzbcwQuA7OuQgLoU0z/KvoafdTYRMvza4c8Dy5/tDFz0jxRm5iFTFb7HUIlobW0jLxtqpyGXxMdinKvlNmWqRQnkNbA7zr1JAU7LoeOrFdenE0V+seVq8JgcVeH1+ZvfvNToONjdJJ9tMHNn3dzB6uesglbXHHK7Tj+uEneIOncYugm8BdDSeYv0OE4ULHpAR4GH52h84axg0fLx1NZnReyopZnfRXYI9MYdRewXnyr4S+CTM/i+XAVEwiLvAljFOISu0rxWtVjTBZH94dHDLyh2rXTWUwZHuXHFmNlVFilrz2a6NzqRy1wXCm2BesU+pcLw7sJdJTkIEzYvvf5Bqand4AJeCki9k4nKk8hdgHm7yH7zkpJCwbMHkm3jzsDZwOaxsRns3ijspj25TnuNKQPNd5/1d6kWx3HPnZZ1mmBkdSoRVE8tsqVS1/RF9yXjUggAQH8BQ464tWqGqlHkQd3Wfrj9sUQSWCULm4lwIlde28Wa8Iq+KgET3sAXEF84zNuJD9mJ8RGuZ2aD+uCnkGBM4a7b7w4iR12b1ebmtGHLNoLlNQoXgHf819nX/nzJPMRpE3/2q7PeHGt2V9e1WokmSrtQBwkweFJrKb5WvTbGwrmUsoreCa6ArkFv/ZY3/Y9UWthlArgj0DFnNzjI6wYrZ7WRNsFX/7TdM32rkHLAk3z5yx5ryLyStX6LTYBCbQDRFDqiepwESlNPb5Iwkob05A84aEJVTRKsga/qYYUIxwXpmdqZ25i8KiwMZpzghFLDjDhAoDOneIqQdxLaUOiKq43jRdH1tfCVZjaF0FmD9MImLOZc9lU8v6Ti0oEkzATqkGB6hncFMCmsAfQFg3h4ZNyZfn3b2SMmfkB9V0fl83NUXXzKBInblxiOzxX9pGG6Xpj7gbDEG2ijbDWtN47GO7gabqRgvjeusH4++g4uJA1HrofnB1oJCUYlaUnt6NkXzy+doylOusGaQDnjGq20vdlVSMzvhHrL3bfrLe1HG6y1rsNVnv67wA92vCnGVj4ZhcZPZxS+riGLDJ2R8MP5nEqXrQn7XzSJdt7kq5+Mch/WzxAidp9nfqt0TtxR0VBcb2V8l+Nx7rsGwk3cMFlB7DT72IuOveOzWm6J+maOUQYDMQQc6DpUQoE1Wf2h1k9Q6LvxK0ifSa/oJ7WTArcHKRZpJHtsOf5c7gftixOFwLY6V9kJWpoFBmi6KiD2pnQQF8ZqoMw9ge0cMvDd63rKhILqjJKgktXklUg10TY2SZwYWUsq2EKfwHHhEJ4WnzrYLZ4tfLIKPRfKU7hqQzi17WBJ3qt45bd7FUIXTH8N+hWf1lPTEQo5DgOBqd9n9csF5SFJpJ7iLsLGySe7fq/BDlrtSn3/9TG4kPzV9ldlWbTFQumIWxuTZ5HPiHH+e8M/XnmX/E6DcjFBu3gwA1+X8vkY9yDSgb8T0nWgStoFZp0AYUXD9G98xBxzstc+mUS3wLmZd4lbM/i5nbfla+np1dbExZkWchk3tqYGcjFAeF/FKvA/NelFJN9dFFUPan12pgtSyGbiOVKJbp3605ErJiM9n8KCsRna6t88AtLZI5zKgEXK1aVwSj/m7UaPr+pwMlynyMSs2ELyM+IyPGOM86XizMAhhBrNR5I6oeiuXrDsZXTXkwBLz0jYF2MiA5tQw1v39DZCSfuquZzLjwbLgYvKXQG/lCksVHJbn4LArFKcflp6AcU6xVj6F4/EKBzSH22G91BNQtnO/FHrGi2C+5YwbLQld2BR/wJZFGI79oKKd7QvM4uh2dsbhPiSHpO9tW8DnNvD/UYeTkXnnPgjyKcw+f//M5Syi9MKRL4j/q7MGqdVQ+qZhEZUVMHMJPxWSwMDnbnVwiLFTzcae3+Xz9WtFI/fqC1gFb0JjT/of/XQVQRZdq7jcqUJAeOPkaJcQOwuuVN6uC+rDfYr8p8/9EB0GwiGjVPwgZX245FXu0W/CPx3FIcwGiDbdv4uz7P4HPXFbEjAvoveGJ7Q6ee2dxd5bIczXOlXzayxQ9PhIVbrpieO9ECUFuDXKalJWQP0VAM4h+LSDRH7TJPW0PNnNOJM9Rz024DNlUFtY4uaY1W0lsmC7RkMCB83cSxcgR16HFOdIKgx169WsD1pZTCGkB0BTWoJUXe9RFTyz1LLwKIo8mGN6WihRErhDqpyK8Z1ixleE9xtUvS2wiwLTSlAmUp17X1edxRl/vSPKdxd+Szky8Ho+7YpJwgxTQuwA4WiWg77BbBb3kBGFgKxEmtOzM1jX5gpPKYZKDm/v4zsu7tfVbAGvdPoCdgDqTMdKuvOCljW78HZfp5ruat9+6mRyafpdOep3cwmMjd7yb0vPOB3AZeF6g8wrhpx6Zy5cy6aBFlP4zgFkFcey9iefoeQZ6aiVqFWsFnVs7AbEXHlJcXEeUhxalGVccs30G+QnVhs9rbmK/yT3Yrb8WJEqOUfF7U+XnHj3n8T5/F9rhGsYqCSzrC0SMrpvJAUu8bac/+4aeRWNyDWeeZb+c0aXxDGwszCl3krNtd/LAroIpNXZJnruupBWS8a4vhdrJnvhqpl3H5kKc936XTO3On8RZJlQZSi7JoLrmjfgs6rklCZAzf2TWIWtWOJcO0JjUtbhHr1zJ9/kZcIr5gViccLyfJk2C4TiuspuOHzVAKku1ZxnJWnmPrq3/AOmZ86fljd+TUCPdzCM7ts20O6HejxLRwzehwPWzXj0DkC34/ruY3Um6BTYVD3R+CEsFnYS7emx1wukXeFQvfqNt6063orbaNCaMC6zfczF4IQqtiu5RNy3/IfCn1N/NQ6/FXunedaUragXK6oSHRNCO5l85kw8ekGDk3253D6PI2wOKapCfwS32fYT96810rOJUBnmKWxZ/XDSQPpFl1dfF5ZjhCbN9MB/jMDCYYdBbPnEmxBLM/6yM71wdchPMynS3xYswVGpvCtOHqf38lEmH6PQs50rtiFQMeaHLVxsM9qkj2brs1zEv4VktZo9jO/HlTEqNpfzptlo6Oq04/P8cMxquKotbp2TKkMtSJbRzzTz8zMP7lBmQ8fASZYRCb5YjHtl0fIgIDVbBBTGLneEkjFkZRHMXAfg3BRyNStqPSm52DeaQGOwh1z4TF0XM7zOHBK7wVJPovod6LX7kHYz5jHk3qtsYkV2t5RPydnNY9e3+z5MdF0QMuNpYfmSIkgrWD84wAKY4y8+jsawEUfFpjmjOQb9t9fBOolu5mD4niu1w3d8Dep6wwl/7WXriXaZZGcka0iFdJEefQGSZt4pZ5REsMVgVrp7P4bAD0+QyAPMa0YjGvREQFjhnK/WOB/noxb23/pIzhbgZjS5S4HbGdpFQM+sICEyUAOFEdv6ttRTITOmM3tt2cEFjlWHoCvPjHrwrW7DUiRvxvUIaXzH7qvbH1ZBDoqy++W/G+77CQ5NJOsWQLM55cjcbEuZGa4y9IiNm/pnE2DG+HC60gTuRkApQzUA2BNhiArSqLy5KwcNZowBNgfMUDJVvsUs14qbUKhE6jY/xPNZpGNStSzAGkxBYb5yq3nYSI9O/qIj7WhaMBKAYBvxN2KwAgYqIeXkeg0TRRu3LwQUF9yFTRgtEC4eVvHKeslYMKburVEcdcko0TjY7WmI6gnjAQ2/9eaZPnRC2ZIit6IyQNsBJzIvBg/LZ7NOH00Nv/kx41w/IggijTLCDQWC9OiwikTSWR3QhG5wSpm2E7JFqnHL+cCgpHUtgkInv5rgpZpZPKbBFHjxSeduYe7pbfkk/rIGKL0bqFgyQvLtqincmHkyua/0eeSKcNbDRUuPq2A9XAHtJlcYttJDKAF7xtTnneOwsb73wtREpF5UASkHdJF/WoWR0OL1E/AXOgO+61aPjAgCilQIK6YPZ8Ocarz9gZF1r3P8IIviF/CLLLX/aH5Nnu97xg+09bSZKgD+HF9GzEwG1XvzvcKW/Go6hDgPTP2aCA+Rw9enqPV6IKhJC6dU0WOgBcLNiaJ8S7vAxTw4KT7gU4TD72gKQyG8KjMhdFCYAEV90KgULcrFmTmN8OdIusfWXazU6SF5B4z3BZ/lrNheM83egxHdifcktk+WshOG5IsU+xgMo2xSASHd/q4FIrzSAO7qm8/AhXYeRYBCNDPk6ekkvtygu/hnFiCq0Fr+jPJPHg7J2nww44rfm1eQMxUka183lrdhOOpYwf3AOJIvHyF/PbPu8ZIom26wo+ukni9rCz/Ch2OpGqGNDG2W6EobSIKwic1prYeBycaLPK+AtgY366A2cfsl0hoqg4ePI7yw++bV1546VPVsXOplLfRnyGvRhjkr84wa5k09g4J6/60N9U5axC75TJmdPzhoiiBcrNrrBJenehk4Ek0D78oDG5q4tE9iIVXRgZRwwiLwzhDJ/vrcWzitK2Y3D5UOqN3i+6fd3lyXEToKjboGeo5cv/uEtV9wBGQzkjphBnJPvHwM2E7WSw5bxIiWKJfJZuvNAJR6nLKbeYJmBcnGHtJCwPZ6xbk1byoRZG1tBuJWB/Xbkp7ixx5iUa7kEzOvhHs8a5YWZawYWzgYyqd3Ur2xfEr4Z+n1qI1geDt6+XMEY/jOOPcJaqf6DX4zCcu2p/smR43tPa4CEfZVb6Oxg5q3c4iigxp5f95JKf3se/PKhiKnVS946Zg+wkqn8Dh0fRFgpp75bVuB+5UdUba9OJ+d6h4DgP8AilseWnqemL0maLBUCDEpiKctoJXkZtuO5MsJNSKxLsykanpUJPN0xiGbugkoyN4mEiJma/BJTkmkiXSm00DEylNn7urXQOCYkiHPsEJK/8MLiOFRSivI40x/BmIMZXRjcVmgC/NcFiI1Y5xArjc5BbAwQhk+GH4dt+yaVP664J13alQZQRATPYgzzkylYW/wZKzwFpMcB6mijjzkjZ/ZpeSSZRE7OdINMGAV+z3cm/Ak+BF7jCOCtSUQi/VrgV44kQGrzDVvTBeF2C6iy6qlvpjuDiUstQ8VvBvw2Tf8olzJ/Ij8+5eCs+ThjEaxEoRgFa468eXkcb7HtIebEtyuW+DEIAkDBrkuYXXVjftzCwiRvRuNoBmHZ1OFYAdp3KGi5JFvohe9KAY+1u5Ac4efcZgpjqCSyLqDl5+4WGhcFsIM41P93U2XGN4LmtVXuzxXzUKgQjt2TGTfovnzPC6SiuQVSHUexdQL1QnNMp7QtCkoszAXaswGPaWthcW34PL+xD3B3hzXDsBZSyHdiNrinrQ4aNpX+xLl5X5ET3L+P1vA0AdqNZ7/W4tve6KEGtwSlRkvg4dYRYU6oweSad9zu71t0t8CV9L0NE9nss0Pf3CEr6RAYUdI48dXM9VSMtmsKyF9ZT9+NtPtJuyhvrltG/qgV4iLOyi9+TqomKMMHThwcHn1hLtmE9LiR1c2OnAuO3U5lGiQ7hoLe1mycwOqsvLF3sf7kr7m5wcyQtQ0FwFieRZ8wjd8gXscpztngLuRczNZapCyEkBDVc920lGT9hu1PgKRHlHJasqJlQ9mikcHSLPJtpgVfIK3ZQGPAbk9xzRpuvOcSkFlWHI/ZK9EjLNU25oiKPchZSEiDBDk43X3ZmP4cwptmsp8MO2C3CI79xrlxejgM13oCC3mYtTWgHUbQvf7QcQbGmPD4OynQOy6nrGYkhK5gsWt3QEaCe3pt+iGfP90LZUSB0JK8kmk2DxYBXw/gyAm3scixiFZ/lPvrsJR2X1dU/EUKP4QDpV2cdi1sWY66qdu3wbPuFHr/L4Q/oRU6GngSK1MagY2WyB1R+O/x8Hb83qcXKbljZnVK9GcGLXVR+QWovlBDSl37z8Ae+upfrGpJAXhGdf+2e76Olukg/Nq3Y0/yXBn/JqJPWjfblhI4rKQBTHBpwBgW8zknsOOGg7bIGARZLLVMMmBUQEk9dHBcBQRka8lHVHrhoZJD+D5HTcCF643XADhcLkMjyG6lo/NmsPyO6QCBJe7GXTr9/6PcT7SlHrAH8uZmwTdRYkiD0QsoHotDWuNNwUQKamww818uKl/GJryuIN+pXm6gwNgpl67usuGQQX8+yZnzMgmJT9OVV0uVN4j/gkoxr01CbYiFZ8/lid3yZpqW09MxFFHzRHKyofAMOkTXX7vInHqVuHPSOAxb6ekNkudl8NN7Fefr5qCGT+uFEPT/S0SdmV12ceIi9PZpTfS6GPL1NiPsduCAniCsmlegddMeY3djOjKNeg4cm6j/YpRBqflyBtgPJkX4T7e7G8SYSORq5xjee0Nibq0eIoinhlQDooopA6ODIq1QgQ6GKUuFj/X0huPkMsfdvHCTCtWraW1zPUOeDmhg7ztrNzMUMCpg/XlBdMPZbFlGzv2WjiE4ZpIbavg1eMIa979ZMvRNxA48G+oFUIZErSEi0oYRlGFyYO/QJUB0ukhmzfPjZOuOteuFuEKng3DejMv35BOZ8eAR2OF9iQGE16gIHu9WjVwg1hJPpef0iVRS61Wp7rl0lsGGYggbfb+cca8m7I/RFZBqnybapUp/F9ZsSZ3Pb/9qYMgEu19JStsPhGz1gbVyL2muTs6TyO5QkeqHt1eqyGq/oby9FXUrO9RXXAw9zxtgVoMnXKdvjpF/wfJyvPxAKeW7p0x6tQfyb7n9LLdrqPUyYyJjVb3xvQ7ps5hQtgCs9aS3sMpDg2bAXrp6lROL8u4nReEXWb55vly4cryfwZQd7wr5jb7WV+emcSK/t5X23F8DbVzDMMVOClKS316BmYSmzyO88RSpsGoLznPLY49HH8OYbbvX/5icbG2frDZyXKepvPVWL8sG6qZyoggu8LdZP4e87o+uX/KZB2u2LCirOL90OuCsKInC1pvjn9b+1aQ5mOdqfxyClqBRjOd8edkgr0Yopb56uQHq16oCltw4KIj82HHMMTBvLKfYGLD5GyC3rCH0lmKADOQMajjyrlq6XY4wgBaf1O77CkG1M8ClSA7wCA0+f6TjGYoOFpUSOFo9H4b0ulrIKWhJ5/HpGmWmn8DmLS1X+dSkEo379YmjaUlPrlY6ZBEMEJB4gM7/T4OGRIx/f1oPwNvyq+LU90Afu7XeiSjZP4uH4ew3OLTfpmkps+KF/qVXY3x9WJPA5byv7BU1H4h9mMN9i0hrUzrsjzAGA0uyg/IyrTNbL53IXQQ+V7vXnXqbCpowvG01fTyBJWfrBCHVx0Ypdide0xW3E9IHZ2gEmvjLovwWXSudDyFS+yGGahUWJK8nHLJzJx43vNsrixHTIhxiIOW79ThC2MX5Yjjd54E+9c+kuIlemEQeO0OeWf4xGc1XMZf3lHlODKmmWqWETcSfojvZ7eMKvB1gd1mTkodIO2ycCQPkrhiz9Dertw+7fk9mh/MVdaFVjiYc0zRPWl1YF3FxEhBNWiVz5QPFMmCDLGmrjl55k1xCUDsiLBiSGW2jS9cQz01DNT5MFi20I4Mhil3sC+NhNIPBTEV3urOcBoH0uTtKqRBnO6+BfvxLSHkaZ6g2x1S2AwGzNULa00VEJeIspvB0FFON1QlVFcqXt2xSvuIJvH8PtUQZ/SVSNvs+hT0r0VC/TQS/QpA9t5hdd20FZAyw7pPf13N/AvYpGybVeMtoOvy7wd9xHqmJf8swndgdrXRZ/y2qgr3kSwtSpbdapJWSho6cmSyCLQIeyBTsDF2rgC0egJZT7ad8TvdbS253faoWzV+hXT3iwoV2j+4dnPlBQzyRHgFpTnZpZ1E5GXzDACKr6OfvO7t0/cJfNa+0xUKfqlEHwTpQ2iyTdeyXBp9rYRsPB9CBzg86CHVJvzO0oo7pQ7RoXLdfHJfDmANQxCbeehU6sity1GzWM9AAyBJhaLMzBCvJvJ6QNjRZyEzPcxDblvTfmHJKu1Gtj+Uy+C3Faz6IoOk5RV8PsLIAWbR4JwW1QrHRUL9+DPRl685nyuCL1lHaAALMOxRoZjUt8JAhDzjNCJC10RPi/ajRBuaKhaTeLZD0F3mn6zbLVDj6eQMFvbFkXODtvinQmJlVwNHKMxUz3CCqP11ogPJij5PQtoTjQIvXWvsBt32DRtH7Tyziwt8ityRo3sOFOjIqz0/VBfuJwjZdbUfMkOax7OlXsDxTIMXA29p/6uNQcYy6kWnVREnMKlHGSuVB5ThZUswU24CQvM4oOBkHftvKfOn6p/cmwH5CvN621gs2DAXBc2oxIUh8OS17cyYuqlxHM0yqxqW/CFbNR0U0Y1u1sQCSnfu89Cay34x0ZjMdr/I0v/aDrQNtQRHVi2WEXG1PsHypubeWCcu64NmEzRYEIdvwVaaEkDlG8yuwq0Gag15yDxtfVHEdCG5GdwIx6YVvyQbfbOGJFiTvfKLTLv/fflrtzZoQqXucgCstrihOxPB3rMB4ukk3iygd7juri4J+Vlem6aHxsBGzws0m3+teiTz2wpxBQAJORrqAZ1B63WLsOaTRLbV4lyoIHTnoDKx7/IGrndNu3f0bnU8lc1mWatkM00riVLD/IcYPBPphR4Ru0e3XerZYeXIz6lxcgIYZZIDTWl0TyYt3ib/u/itOdMXhl0SC3JHOq4+ihZ/uV2K9qJdSI6trHuNss8dMlA5UI5N6GODZWNDP9bckC70/X0V7UC3O78PoJJ1G+fj49of18Xo4MtUf6hLsQwiRNrwJPfghFCSTqda07Vd072//ZwrMHlWvtJRKXCZ5KprKCYjgxAovdCg9q6jh/qNV5iXC5RMRDc37gtJ3vwRN1Q60pqCB+QktLWe//+5rsWcb/IihMK/bftGZQDCXKfPRjM3xdINpWUei10m8tYEIwhFZwTwOoFhefkqJhMwbHqwMNeK+79EogWWWJeXbyk/0kkWIrFJiALngRQ7nBPSjwHvjyPka8fgojoT1g3LzHXxBpF31TvJestOlWIpnZJ+zkUsLu7VlPDXaeNnIdScQLtwHK5ADy4m7my++JMGlGuJOnFwg67rQYHGoPBaRCkpL1sPfJ2oG8qydasb9QcnMwQgl8na4kzxJCFSuf/JWkztVX1VoZ7D/c8Ihm9e6aMlESnR0+ikEp8O+nPtj12vZPatOjqzYFwlCPpUUvMOyjHgb0IEXfU/GTVNjrCi2m1G/n/YZJEuDc8M0MlrR0j6hf1OXjWuLMjaHCMrqb2bGh7QWc0uAO8R1tgaYcnLzScOXRmILeXNDpk7NgXO8gv4+UNNaXZ8rNNM+jz6RBchJyuXDnUTpXksGMwgojMNrrsH1T6jMQTUCRSGgnAdDl04pEqKbXhXXcWy85pANMj3G4KSgIomQCcVawjFZEBYLAgI73+vQGlDKpBQ7ZuiN7RYkdDE/Z1HL6kItAlmSKLf58B6p9MzYP4MzqLhY8rZOnQ+JJljreqaaSFXBvgPSJc2ciVDYFM8OSePmoPGROS0sB5hkO5/c1Pe6r8b5I8N5Sx7J8bY62/nAJuNHb+FWud1MNjlUIPReVGjz/txpfCDrXo29iy2r1AeWLxd0etaHBQL6YLbQgNjptctN2p+JtZQfPLjKkhmKE1c/NeoZzMlECtPrx7+cvUpgxrYzCu8nw3Qt4GFO4zVKQdDTQCSXrFjxyHlnQ48ZAnGTvTKEOqezoIq6frjKnHpJ7JsLrgRjz3L9tA430f9xQFdOnfVbO4EjPGH/ssapWAUlXwLQ8jkFnWgJRs8Vi8tRQ7BAMhX9YpjAsirWyVnjoisH/oWp6pUL1Qt3xQ8V61Yl1FAlsr2oNIjCLyZpCnVBMatXq8qrpn6USuA/cf+rjZ3Tx5N1VbgoKvtgPm1V4JZESAwX+a6Vld8XjZ7TEzzlu3pTXUfF/blvcwx30PtE5b/rwyX4tfFDujuhtJxZccWoTuJsyh4nl+NTJR1gn4YFjLEfEJnjqwN513fCim6xp35Q0dicxZOCLsQ2FHPLHG6Zy9NqEMAT3cUWqSbUn5OoR30ADYSbZe5xiNdOloFvW6nRp1Z47jLRuJsGR80bcw76wzp2HnXAgwkD7IwrJVDvcACDFjECdDBuh0ASsed4jhC1VE4NA/0sJHn/wDI5TNSe9R9rvHAu+u/vKt2ZHRlJSST60flTuvgNalBiZ203wbXhOF6gbw8Yzo6OgLDQdY7oPcefNoE+ChtK8PV45v+zLdwdWkojhLA0HAxuHY0zkiuQudNrVFySYkLQdHkSUVGctPCd8t4xY7y4gGSZ6cixUcQjId4AqjrMns3cRko9sX4ixLahOXpj/PJMMYZ0bRLq0POwGJrQn+C4h29ZKPYfQlYRTQPmZWibVbvoEXYWrPMPs5CRT9U/qWKY/CofYrdOQnBO/8Uhw9Ht6H16T8m6tiwj9lcf5DwW7l9+Z7QX0XqQsDGOFsM/joV/vwuFRSn/CYiiAhg3H2NDJQhTzUbgTfUd1vJJQj5t8ck8qhbWdLtWxjhAeKcNuwY7zspQOGAv+Ysb9ibo/Vtfj5+h3ist8hsrm2iVuvgsjEarCbbWeaQHRcS0abKcLfctV4ZDEcr0J+fVGZ0AOhvIF1mZimiM0WLZVHf90BJcyWOA8vL8PfVc4DfO9LJ0WtKHYTSuEDmnuyfJa5z60hQRh5F6MOSTHDmBdOw3x5as1TIx0LfnKTv9gfXTCHEwQPcw52xGEYitrR3HPJ+Y06zJXBfzLB0nVKMa4JVuVH/HSkDXNCc+KzAirsuzRZwPQPKHXC3CEyFilcociUGXk9yQ8qpBDTmGX9iDbib8J8DPiMk8yHzsLWa0FJJ0PKxAhObsDKAcAgOI3bS/23oEIBJCT1BMpaLNbCjU4JMXLsikVm56hQu1gFRE1p5k/qo+p13ed0JD1Zr5WjC+Ldt0Kawv4E9ZXLdiZAax8kqIemjqSCyftzYgygagEsxBSpBFcuWkMnpdqTe4DT7aBXBc8G/hGJjzLegDk7P/b1ANlGPgaDkTAzBNSmqrtOesxyoD0fUpdCkdXGQAL5T/BqYImbFM5k1NtbUipT5kXpPjtVZw7tqAcmxTJS0UuNQtySDg9JEKM68TefbHKcJcsHywJP2WWrA9LAPTAAG0kCHcF2XgWle5KG4xq0ih/u15bGzLDNm5enk9OQqBfaNxVkeh/uaaVm0Zc0vMeou8dZSVdFFo98LP+p1DqYQi4aKWmmSWEe1IMS7GgxsE0TAGAzu4U31jWnL21qSylq7zb/ChG9N42F+fApbqoWvuKbIUJ63bT3PpBQwJ1nyqK8CaIIm9TeYhgWisYOL7Y6wWUnVTzIVPGBDD/mZ2jzSmvNxP2I1jpruqdetCjSjXVCJhxbuDhQk2L+0yeb+81PoeFVIus/6eL/+VN1kiq+rXnGZAXyb3Zdh+bIXbeCfJHOwWlJdR/7XpMsoSGhiAeqeA==$4f386b50df3fcd9132589a934851faaff16709ff", "!@#$%^&*"},
#endif
{NULL}
};
/* taken from LUKS on disk format specification */
struct luks_phdr {
char magic[LUKS_MAGIC_L];
uint16_t version;
char cipherName[LUKS_CIPHERNAME_L];
char cipherMode[LUKS_CIPHERMODE_L];
char hashSpec[LUKS_HASHSPEC_L];
uint32_t payloadOffset;
uint32_t keyBytes;
char mkDigest[LUKS_DIGESTSIZE];
char mkDigestSalt[LUKS_SALTSIZE];
uint32_t mkDigestIterations;
char uuid[UUID_STRING_L];
struct {
uint32_t active;
uint32_t passwordIterations;
char passwordSalt[LUKS_SALTSIZE];
uint32_t keyMaterialOffset;
uint32_t stripes;
} keyblock[LUKS_NUMKEYS];
};
static struct custom_salt {
struct luks_phdr myphdr;
int loaded;
unsigned char *cipherbuf;
int afsize;
int bestslot;
int bestiter;
char path[8192];
} *cur_salt;
static void XORblock(char *src1, char *src2, char *dst, int n)
{
int j;
for (j = 0; j < n; j++)
dst[j] = src1[j] ^ src2[j];
}
static int diffuse(unsigned char *src, unsigned char *dst, int size)
{
uint32_t i;
uint32_t IV; /* host byte order independent hash IV */
SHA_CTX ctx;
int fullblocks = (size) / 20;
int padding = size % 20;
for (i = 0; i < fullblocks; i++) {
IV = john_htonl(i);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &IV, 4);
SHA1_Update(&ctx, src + 20 * i, 20);
SHA1_Final(dst + 20 * i, &ctx);
}
if (padding) {
IV = john_htonl(fullblocks);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &IV, 4);
SHA1_Update(&ctx, src + 20 * fullblocks, padding);
SHA1_Final(dst + 20 * fullblocks, &ctx);
}
return 0;
}
static int AF_merge(unsigned char *src, unsigned char *dst, int afsize,
int stripes)
{
int i;
char *bufblock;
int blocksize = afsize / stripes;
bufblock = mem_calloc(blocksize + 20);
for (i = 0; i < (stripes - 1); i++) {
XORblock((char *) (src + (blocksize * i)), bufblock, bufblock,
blocksize);
diffuse((unsigned char *) bufblock, (unsigned char *) bufblock,
blocksize);
}
XORblock((char *) (src + blocksize * (stripes - 1)), bufblock,
(char *) dst, blocksize);
MEM_FREE(bufblock);
return 0;
}
static int af_sectors(int blocksize, int blocknumbers)
{
int af_size;
af_size = blocksize * blocknumbers;
af_size = (af_size + 511) / 512;
af_size *= 512;
return af_size;
}
static void decrypt_aes_cbc_essiv(unsigned char *src, unsigned char *dst,
unsigned char *key, int size, struct custom_salt *cs)
{
AES_KEY aeskey;
unsigned char essiv[16];
unsigned char essivhash[32];
int a;
SHA256_CTX ctx;
unsigned char sectorbuf[16];
unsigned char zeroiv[16];
for (a = 0; a < (size / 512); a++) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, john_ntohl(cs->myphdr.keyBytes));
SHA256_Final(essivhash, &ctx);
memset(sectorbuf, 0, 16);
memset(zeroiv, 0, 16);
memset(essiv, 0, 16);
#if ARCH_LITTLE_ENDIAN
memcpy(sectorbuf, &a, 4);
#else
{ int b = JOHNSWAP(a); memcpy(sectorbuf, &b, 4); }
#endif
AES_set_encrypt_key(essivhash, 256, &aeskey);
AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT);
AES_set_decrypt_key(key, john_ntohl(cs->myphdr.keyBytes)*8, &aeskey);
AES_cbc_encrypt((src+a*512), (dst+a*512), 512, &aeskey, essiv, AES_DECRYPT);
}
}
static int hash_plugin_parse_hash(char *filename, struct custom_salt *cs, int is_critical)
{
FILE *myfile;
int readbytes;
myfile = jtr_fopen(filename, "rb");
if (!myfile) {
fprintf(stderr, "\n%s : %s!\n", filename, strerror(errno));
return -1;
}
// can this go over 4gb?
cs->cipherbuf = mem_alloc_tiny(cs->afsize + 1, MEM_ALIGN_NONE);
if (!cs->cipherbuf)
goto bad;
// printf(">>> %d\n", cs->afsize);
readbytes = fread(cs->cipherbuf, cs->afsize, 1, myfile);
if (readbytes < 0) {
//free(cs->cipherbuf);
fprintf(stderr, "%s : unable to read required data\n",
filename);
goto bad;
}
fclose(myfile);
return 0;
bad:
fclose(myfile);
if (is_critical) {
fprintf(stderr, "\nLUKS plug-in is unable to continue due to errors!\n");
exit(-1);
}
return -1;
}
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
static int warned = 0;
// extern struct fmt_main fmt_luks;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
/*
* LUKS format will need to be redesigned to address the issues mentioned in
* https://github.com/magnumripper/JohnTheRipper/issues/557.
* This will require a change in john's hash representation for LUKS format.
* The redesign will happen after the next official jumbo release.
* To avoid having to support the current LUKS hash representation forever,
* just print a warning that the hash representation will change in future releases.
*
* So far, no "official" jumbo release supports the LUKS format, currently only
* users of bleeding-jumbo may have used LUKS format. These users should be able
* to re-run luks2john and retry the passwords that have been stored for the current LUKS hashes
* once the redesign of john's LUKS format implementation has been completed.)
*/
if (!options.listconf && !(options.flags & FLG_TEST_CHK) && warned++ == 0) {
fprintf(stderr,
"WARNING, LUKS format hash representation will change in future releases,\n"
"see doc/README.LUKS\n"); // FIXME: address github issue #557 after 1.8.0-jumbo-1
fflush(stderr);
}
// This printf will 'help' debug a system that truncates that monster hash, but does not cause compiler to die.
// printf ("length=%d end=%s\n", strlen(fmt_luks.params.tests[0].ciphertext), &((fmt_luks.params.tests[0].ciphertext)[strlen(fmt_luks.params.tests[0].ciphertext)-30]));
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p, *q;
int is_inlined;
int res;
static struct custom_salt cs;
if (strncmp(ciphertext, "$luks$", 6) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtok(ctcopy, "$")) == NULL) /* is_inlined */
goto err;
is_inlined = atoi(p);
if (is_inlined) {
if ((p = strtok(NULL, "$")) == NULL)
goto err;
res = atoi(p);
if (res != sizeof(struct luks_phdr))
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if (res * 2 != strlen(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
res = atoi(p);
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if (strlen(p) != LUKS_DIGESTSIZE * 2)
goto err;
if (!ishex(p))
goto err;
}
else {
if ((p = strtok(NULL, "$")) == NULL) /* path */
goto err;
q = p;
if ((p = strtok(NULL, "$")) == NULL) /* mkDigest */
goto err;
/* more tests */
if (hash_plugin_parse_hash(q, &cs, 0) != 0) {
MEM_FREE(cs.cipherbuf);
return 0;
}
MEM_FREE(cs.cipherbuf);
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int is_inlined;
int res;
int i;
int cnt;
unsigned char *out;
static struct custom_salt cs;
unsigned int bestiter = 0xFFFFFFFF;
size_t size;
ctcopy += 6;
memset(&cs, 0, sizeof(cs));
out = (unsigned char*)&cs.myphdr;
p = strtok(ctcopy, "$");
is_inlined = atoi(p);
/* common handling */
p = strtok(NULL, "$");
res = atoi(p);
assert(res == sizeof(struct luks_phdr));
p = strtok(NULL, "$");
for (i = 0; i < res; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtok(NULL, "$");
res = atoi(p);
cs.afsize = af_sectors(john_ntohl(cs.myphdr.keyBytes),
john_ntohl(cs.myphdr.keyblock[cs.bestslot].stripes));
assert(res == cs.afsize);
if (is_inlined) {
p = strtok(NULL, "$");
size = (strlen(p) + 20) / 4 * 3 + 1;
cs.cipherbuf = mem_alloc_tiny(size, MEM_ALIGN_NONE);
base64_decode(p, strlen(p), (char*)cs.cipherbuf);
}
else {
p = strtok(NULL, "$");
p = strtok(NULL, "$");
strcpy(cs.path, p);
hash_plugin_parse_hash(cs.path, &cs, 1);
}
for (cnt = 0; cnt < LUKS_NUMKEYS; cnt++) {
if ((john_ntohl(cs.myphdr.keyblock[cnt].passwordIterations) < bestiter)
&& (john_ntohl(cs.myphdr.keyblock[cnt].passwordIterations) > 1) &&
(john_ntohl(cs.myphdr.keyblock[cnt].active) == 0x00ac71f3)) {
cs.bestslot = cnt;
cs.bestiter =
john_ntohl(cs.myphdr.keyblock[cnt].passwordIterations);
}
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[LUKS_DIGESTSIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < LUKS_DIGESTSIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char keycandidate[255];
unsigned char masterkeycandidate[255];
unsigned char *af_decrypted = mem_alloc(cur_salt->afsize + 20);
char *password = saved_key[index];
int iterations = cur_salt->bestiter;
int dklen = john_ntohl(cur_salt->myphdr.keyBytes);
// printf("itertations %d %d %d\n", iterations, dklen, cur_salt->bestslot);
// Get pbkdf2 of the password to obtain decryption key
derive_key((const uint8_t*)password, strlen(password),
(const uint8_t*)(cur_salt->myphdr.keyblock[cur_salt->bestslot].passwordSalt),
LUKS_SALTSIZE,
iterations,
keycandidate,
dklen);
// Decrypt the blocksi
decrypt_aes_cbc_essiv(cur_salt->cipherbuf, af_decrypted, keycandidate, cur_salt->afsize, cur_salt);
// AFMerge the blocks
AF_merge(af_decrypted, masterkeycandidate, cur_salt->afsize,
john_ntohl(cur_salt->myphdr.keyblock[cur_salt->bestslot].stripes));
// pbkdf2 again
derive_key(masterkeycandidate,
john_ntohl(cur_salt->myphdr.keyBytes),
(const uint8_t*)cur_salt->myphdr.mkDigestSalt,
LUKS_SALTSIZE,
john_ntohl(cur_salt->myphdr.mkDigestIterations),
(unsigned char*)crypt_out[index],
LUKS_DIGESTSIZE);
MEM_FREE(af_decrypted);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], LUKS_DIGESTSIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], LUKS_DIGESTSIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void luks_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_luks = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
luks_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
luks_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright @ 2007 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distort.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/shear.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortMethod *method,const size_t number_arguments,const double *arguments,
size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
size_t
i;
size_t
number_coefficients, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coefficients=0;
switch (*method) {
case AffineDistortion:
case RigidAffineDistortion:
/* also BarycentricColorInterpolate: */
number_coefficients=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
i = poly_number_terms(arguments[0]);
number_coefficients = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coefficients=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coefficients=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coefficients=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coefficients=1; /* The power factor to use */
break;
case ArcDistortion:
number_coefficients=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coefficients=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coefficients=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coefficients=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coefficients=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff=(double *) AcquireQuantumMemory(number_coefficients,sizeof(*coeff));
if (coeff == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coefficients; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix=AcquireMagickMatrix(3UL,3UL);
vectors=(double **) AcquireQuantumMemory(number_values,
sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case RigidAffineDistortion:
{
double
inverse[6],
**matrix,
terms[5],
*vectors[1];
MagickBooleanType
status;
/*
Rigid affine (also known as a Euclidean transform), restricts affine
coefficients to 4 (S, R, Tx, Ty) with Sy=Sx and Ry = -Rx so that one has
only scale, rotation and translation. No skew.
*/
if (((number_arguments % cp_size) != 0) || (number_arguments < cp_size))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions,*method),2.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/*
Rigid affine requires a 4x4 least-squares matrix (zeroed).
*/
matrix=AcquireMagickMatrix(4UL,4UL);
if (matrix == (double **) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
CommandOptionToMnemonic(MagickDistortOptions,*method));
return((double *) NULL);
}
/*
Add control points for least squares solving.
*/
vectors[0]=(&(coeff[0]));
for (i=0; i < number_arguments; i+=4)
{
terms[0]=arguments[i+0];
terms[1]=(-arguments[i+1]);
terms[2]=1.0;
terms[3]=0.0;
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+2]),4UL,1UL);
terms[0]=arguments[i+1];
terms[1]=arguments[i+0];
terms[2]=0.0;
terms[3]=1.0;
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+3]),4UL,1UL);
}
/*
Solve for least-squares coefficients.
*/
status=GaussJordanElimination(matrix,vectors,4UL,1UL);
matrix=RelinquishMagickMatrix(matrix,4UL);
if (status == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions,*method));
return((double *) NULL);
}
/*
Convert (S, R, Tx, Ty) to an affine projection.
*/
inverse[0]=coeff[0];
inverse[1]=coeff[1];
inverse[2]=(-coeff[1]);
inverse[3]=coeff[0];
inverse[4]=coeff[2];
inverse[5]=coeff[3];
AffineArgsToCoefficients(inverse);
InvertAffineCoefficients(inverse,coeff);
*method=AffineDistortion;
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix=AcquireMagickMatrix(4UL,4UL);
vectors=(double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix=AcquireMagickMatrix(nterms,nterms);
vectors=(double **) AcquireQuantumMemory(number_values,
sizeof(*vectors));
terms=(double *) AcquireQuantumMemory(nterms,sizeof(*terms));
if ((matrix == (double **) NULL) || (vectors == (double **) NULL) ||
(terms == (double *) NULL))
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *DistortResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) memset(distort_args,0,sizeof(distort_args));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod,
exception);
if (image->alpha_trait == UndefinedPixelTrait)
{
/*
Image has no alpha channel, so we are free to use it.
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
distort alpha channel separately
*/
Image
*resize_alpha;
(void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_alpha == (Image *) NULL)
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if (tmp_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod,exception);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if (resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception);
(void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception);
(void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
resize_alpha=DestroyImage(resize_alpha);
resize_image->alpha_trait=image->alpha_trait;
resize_image->compose=image->compose;
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save,exception);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if (resize_image != (Image *) NULL)
{
resize_image->page.width=0;
resize_image->page.height=0;
}
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image, DistortMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
PixelInfo
invalid; /* the color to assign when distort result is invalid */
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t) ceil((coeff[0]-coeff[1])*
(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])*PerceptibleReciprocal(geometry.width); /* changed width */
coeff[7]=(coeff[0]-coeff[1])*PerceptibleReciprocal(geometry.height); /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
ssize_t
i;
char image_gen[MagickPathExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen,MagickPathExtent,
" -size %.20gx%.20g -page %+.20g%+.20g xc: +insert \\\n",
(double) geometry.width,(double) geometry.height,(double) geometry.x,
(double) geometry.y);
lookup="v.p{xx-v.page.x-0.5,yy-v.page.y-0.5}";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{xx-page.x-0.5,yy-page.y-0.5}"; /* simplify lookup */
}
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine projection:\n");
(void) FormatLocaleFile(stderr,
" -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%.*g,",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[5]);
(void) FormatLocaleFile(stderr,
"Equivalent scale, rotation(deg), translation:\n");
(void) FormatLocaleFile(stderr," %.*g,%.*g,%.*g,%.*g\n",
GetMagickPrecision(),sqrt(inverse[0]*inverse[0]+
inverse[1]*inverse[1]),GetMagickPrecision(),
RadiansToDegrees(atan2(inverse[1],inverse[0])),
GetMagickPrecision(),inverse[4],GetMagickPrecision(),inverse[5]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Affine distort, FX equivalent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," xx=%+.*g*ii %+.*g*jj %+.*g;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr," yy=%+.*g*ii %+.*g*jj %+.*g;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," %s' \\\n",lookup);
break;
}
case PerspectiveDistortion:
{
double
*inverse;
inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
"DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr,"Perspective Projection:\n");
(void) FormatLocaleFile(stderr,
" -distort PerspectiveProjection \\\n '");
for (i=0; i < 4; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for ( ; i < 7; i++)
(void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(),
inverse[i]);
(void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(),
inverse[7]);
inverse=(double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%.1024s",image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n",
GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]);
(void) FormatLocaleFile(stderr,
" xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1],
GetMagickPrecision(),coeff[2]);
(void) FormatLocaleFile(stderr,
" yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n",
GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4],
GetMagickPrecision(),coeff[5]);
(void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n",
coeff[8] < 0.0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
{
(void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4],coeff[5],coeff[6],coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5-
coeff[7]);
(void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if (coeff[9] != 0)
{
(void) FormatLocaleFile(stderr,
" rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4],
-coeff[0]);
(void) FormatLocaleFile(stderr,
" yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]);
}
else
(void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4],coeff[0]);
(void) FormatLocaleFile(stderr,
" xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0],
coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n",
lookup);
else
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BilinearReverseDistortion:
{
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr,
"BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr,
" xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1],
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr,
" yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5],
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr,
"Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0],
(unsigned long) nterms);
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n yy =");
for (i=0; i < (ssize_t) nterms; i++)
{
if ((i != 0) && (i%4 == 0))
(void) FormatLocaleFile(stderr,"\n ");
(void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr,
" c%.20g = %+lf\n",(double) i,coeff[i]);
(void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1],
coeff[4]);
(void) FormatLocaleFile(stderr,
" yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]);
(void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr," xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1],coeff[7] );
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr,
"DePolar Distort, Internal Coefficents\n");
for (i=0; i < 8; i++)
(void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i,
coeff[i]);
(void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n",
coeff[6],+coeff[4]);
(void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n",
coeff[7],+coeff[1]);
(void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n",
coeff[2]);
(void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n",
coeff[3]);
(void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n",
coeff[1],coeff[2]);
(void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]);
(void) FormatLocaleFile(stderr,
"Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr,"%s", image_gen);
(void) FormatLocaleFile(stderr,
" -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4],
-coeff[5]);
(void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]);
(void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1],
coeff[2] );
(void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]);
(void) FormatLocaleFile(stderr," %s' \\\n", lookup);
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
double
xc,
yc;
/*
NOTE: This does the barrel roll in pixel coords not image coords
The internal distortion must do it in image coordinates,
so that is what the center coeff (8,9) is given in.
*/
xc=((double)image->columns-1.0)/2.0+image->page.x;
yc=((double)image->rows-1.0)/2.0+image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]-
0.5,coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr,
" ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2],
coeff[3]);
(void) FormatLocaleFile(stderr,
" jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6],
coeff[7]);
(void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/*
The user provided a 'scale' expert option will scale the output image size,
by the factor given allowing for super-sampling of the distorted image
space. Any scaling factors must naturally be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
{
coeff=(double *) RelinquishMagickMemory(coeff);
return((Image *) NULL);
}
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse)
{
coeff=(double *) RelinquishMagickMemory(coeff);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace,exception);
if (distort_image->background_color.alpha_trait != UndefinedPixelTrait)
distort_image->alpha_trait=BlendPixelTrait;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid,
exception);
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ResampleFilter
**magick_restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetPixelInfo(distort_image,&zero);
resample_filter=AcquireResampleFilterTLS(image,UndefinedVirtualPixelMethod,
MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel color to assign to distorted image */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
ssize_t
i;
Quantum
*magick_restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
case RigidAffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,n,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
n=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = n*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - n*coeff[6])*scale,
(r*coeff[4] - n*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 0
/*if ( i == 0 && j == 0 )*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
double
denominator;
size_t
k;
denominator = s.x = s.y = 0;
for(k=0; k<number_arguments; k+=4) {
double weight =
((double)d.x-arguments[k+2])*((double)d.x-arguments[k+2])
+ ((double)d.y-arguments[k+3])*((double)d.y-arguments[k+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ k ]-arguments[k+2])*weight;
s.y += (arguments[k+1]-arguments[k+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelViaPixelInfo(distort_image,&invalid,q);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel,
exception);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelViaPixelInfo(distort_image,&pixel,q);
}
q+=GetPixelChannels(distort_image);
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DistortImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterTLS(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff=(double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
double
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod,
exception);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const SparseColorMethod method,const size_t number_arguments,
const double *arguments,ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortMethod
distort_method;
distort_method=(DistortMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
PixelInfo
pixel; /* pixel to assign to distorted image */
ssize_t
i;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
ssize_t x=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=0.0;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=0.0;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=0.0;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=0.0;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red += arguments[x++]*weight;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green += arguments[x++]*weight;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue += arguments[x++]*weight;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black += arguments[x++]*weight;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha += arguments[x++]*weight;
denominator += weight;
}
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red/=denominator;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green/=denominator;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue/=denominator;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black/=denominator;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha/=denominator;
break;
}
case ManhattanColorInterpolate:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
fabs((double)i-arguments[ k ])
+ fabs((double)j-arguments[k+1]);
if ( distance < minimum ) {
ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
case VoronoiColorInterpolate:
default:
{
size_t
k;
double
minimum = MagickMaximumValue;
/*
Just use the closest control point you can find!
*/
for (k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
ssize_t x=(ssize_t) k+2;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=arguments[x++];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=arguments[x++];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=arguments[x++];
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=arguments[x++];
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha);
SetPixelViaPixelInfo(sparse_image,&pixel,q);
q+=GetPixelChannels(sparse_image);
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SparseColorTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
resource_strings.h | #pragma once
#include <torch/csrc/jit/fuser/config.h>
#if USE_CPU_FUSER
#include <torch/csrc/jit/code_template.h>
namespace torch { namespace jit { namespace fuser { namespace cpu {
/*with type_as not checking type of its input, a fusion group can have non-fp32 tensor as input.
Correct code for this case is generated, however, nvrtc does not know how to handle int*_t integer types,
so typedefs help it handle those cases*/
static auto type_declarations_template = CodeTemplate(R"(
#define POS_INFINITY INFINITY
#define NEG_INFINITY -INFINITY
typedef ${IndexType} IndexType;
template<typename T, size_t N>
struct TensorInfo {
T* data;
IndexType sizes[N];
IndexType strides[N];
};
template<typename T>
struct TensorInfo<T, 0> {
T * data;
};
)");
static auto cpu_compilation_unit_template = CodeTemplate(R"(
#include <cstddef>
#include <cstdint>
#include <math.h>
template <typename scalar_t>
scalar_t rsqrtf(scalar_t x) {
return 1.0/sqrtf(x);
}
${type_declarations}
#define OMP_THRESHOLD 100000
static void ${kernelName}_kernel(IndexType totalElements, ${formals}) {
#pragma omp parallel for if(totalElements > OMP_THRESHOLD)
for (IndexType linearIndex = 0;
linearIndex < totalElements;
linearIndex += 1) {
// Convert `linearIndex` into an offset of tensor:
${tensorOffsets}
// calculate the results
${kernelBody}
}
}
extern "C"
void ${kernelName}(IndexType totalElements, void ** args) {
${kernelName}_kernel(totalElements ${,argument_loads});
}
)");
} // namespace cpu
} // namespace fuser
} // namespace jit
} // namespace torch
#endif // USE_CPU_FUSER
|
c-parser.c | /* Parser for C and Objective-C.
Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Parser actions based on the old Bison parser; structure somewhat
influenced by and fragments based on the C++ parser.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
/* TODO:
Make sure all relevant comments, and all relevant code from all
actions, brought over from old parser. Verify exact correspondence
of syntax accepted.
Add testcases covering every input symbol in every state in old and
new parsers.
Include full syntax for GNU C, including erroneous cases accepted
with error messages, in syntax productions in comments.
Make more diagnostics in the front end generally take an explicit
location rather than implicitly using input_location. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "langhooks.h"
#include "input.h"
#include "cpplib.h"
#include "timevar.h"
#include "c-pragma.h"
#include "c-tree.h"
#include "flags.h"
#include "output.h"
#include "toplev.h"
#include "ggc.h"
#include "c-common.h"
#include "vec.h"
#include "target.h"
#include "cgraph.h"
/* Miscellaneous data and functions needed for the parser. */
int yydebug;
/* Objective-C specific parser/lexer information. */
static int objc_pq_context = 0;
/* The following flag is needed to contextualize Objective-C lexical
analysis. In some cases (e.g., 'int NSObject;'), it is undesirable
to bind an identifier to an Objective-C class, even if a class with
that name exists. */
static int objc_need_raw_identifier = 0;
#define OBJC_NEED_RAW_IDENTIFIER(VAL) \
do { \
if (c_dialect_objc ()) \
objc_need_raw_identifier = VAL; \
} while (0)
/* The reserved keyword table. */
struct resword
{
const char *word;
ENUM_BITFIELD(rid) rid : 16;
unsigned int disable : 16;
};
/* Disable mask. Keywords are disabled if (reswords[i].disable &
mask) is _true_. */
#define D_C89 0x01 /* not in C89 */
#define D_EXT 0x02 /* GCC extension */
#define D_EXT89 0x04 /* GCC extension incorporated in C99 */
#define D_OBJC 0x08 /* Objective C only */
static const struct resword reswords[] =
{
{ "_Bool", RID_BOOL, 0 },
{ "_Complex", RID_COMPLEX, 0 },
{ "_Decimal32", RID_DFLOAT32, D_EXT },
{ "_Decimal64", RID_DFLOAT64, D_EXT },
{ "_Decimal128", RID_DFLOAT128, D_EXT },
{ "__FUNCTION__", RID_FUNCTION_NAME, 0 },
{ "__PRETTY_FUNCTION__", RID_PRETTY_FUNCTION_NAME, 0 },
{ "__alignof", RID_ALIGNOF, 0 },
{ "__alignof__", RID_ALIGNOF, 0 },
{ "__asm", RID_ASM, 0 },
{ "__asm__", RID_ASM, 0 },
{ "__attribute", RID_ATTRIBUTE, 0 },
{ "__attribute__", RID_ATTRIBUTE, 0 },
{ "__builtin_choose_expr", RID_CHOOSE_EXPR, 0 },
{ "__builtin_offsetof", RID_OFFSETOF, 0 },
{ "__builtin_types_compatible_p", RID_TYPES_COMPATIBLE_P, 0 },
{ "__builtin_va_arg", RID_VA_ARG, 0 },
{ "__complex", RID_COMPLEX, 0 },
{ "__complex__", RID_COMPLEX, 0 },
{ "__const", RID_CONST, 0 },
{ "__const__", RID_CONST, 0 },
{ "__extension__", RID_EXTENSION, 0 },
{ "__func__", RID_C99_FUNCTION_NAME, 0 },
{ "__imag", RID_IMAGPART, 0 },
{ "__imag__", RID_IMAGPART, 0 },
{ "__inline", RID_INLINE, 0 },
{ "__inline__", RID_INLINE, 0 },
{ "__label__", RID_LABEL, 0 },
{ "__real", RID_REALPART, 0 },
{ "__real__", RID_REALPART, 0 },
{ "__restrict", RID_RESTRICT, 0 },
{ "__restrict__", RID_RESTRICT, 0 },
{ "__signed", RID_SIGNED, 0 },
{ "__signed__", RID_SIGNED, 0 },
{ "__thread", RID_THREAD, 0 },
{ "__typeof", RID_TYPEOF, 0 },
{ "__typeof__", RID_TYPEOF, 0 },
{ "__volatile", RID_VOLATILE, 0 },
{ "__volatile__", RID_VOLATILE, 0 },
{ "asm", RID_ASM, D_EXT },
{ "auto", RID_AUTO, 0 },
{ "break", RID_BREAK, 0 },
{ "case", RID_CASE, 0 },
{ "char", RID_CHAR, 0 },
{ "const", RID_CONST, 0 },
{ "continue", RID_CONTINUE, 0 },
{ "default", RID_DEFAULT, 0 },
{ "do", RID_DO, 0 },
{ "double", RID_DOUBLE, 0 },
{ "else", RID_ELSE, 0 },
{ "enum", RID_ENUM, 0 },
{ "extern", RID_EXTERN, 0 },
{ "float", RID_FLOAT, 0 },
{ "for", RID_FOR, 0 },
{ "goto", RID_GOTO, 0 },
{ "if", RID_IF, 0 },
{ "inline", RID_INLINE, D_EXT89 },
{ "int", RID_INT, 0 },
{ "long", RID_LONG, 0 },
{ "register", RID_REGISTER, 0 },
{ "restrict", RID_RESTRICT, D_C89 },
{ "return", RID_RETURN, 0 },
{ "short", RID_SHORT, 0 },
{ "signed", RID_SIGNED, 0 },
{ "sizeof", RID_SIZEOF, 0 },
{ "static", RID_STATIC, 0 },
{ "struct", RID_STRUCT, 0 },
{ "switch", RID_SWITCH, 0 },
{ "typedef", RID_TYPEDEF, 0 },
{ "typeof", RID_TYPEOF, D_EXT },
{ "union", RID_UNION, 0 },
{ "unsigned", RID_UNSIGNED, 0 },
{ "void", RID_VOID, 0 },
{ "volatile", RID_VOLATILE, 0 },
{ "while", RID_WHILE, 0 },
/* These Objective-C keywords are recognized only immediately after
an '@'. */
{ "class", RID_AT_CLASS, D_OBJC },
{ "compatibility_alias", RID_AT_ALIAS, D_OBJC },
{ "defs", RID_AT_DEFS, D_OBJC },
{ "encode", RID_AT_ENCODE, D_OBJC },
{ "end", RID_AT_END, D_OBJC },
{ "implementation", RID_AT_IMPLEMENTATION, D_OBJC },
{ "interface", RID_AT_INTERFACE, D_OBJC },
{ "private", RID_AT_PRIVATE, D_OBJC },
{ "protected", RID_AT_PROTECTED, D_OBJC },
{ "protocol", RID_AT_PROTOCOL, D_OBJC },
{ "public", RID_AT_PUBLIC, D_OBJC },
{ "selector", RID_AT_SELECTOR, D_OBJC },
{ "throw", RID_AT_THROW, D_OBJC },
{ "try", RID_AT_TRY, D_OBJC },
{ "catch", RID_AT_CATCH, D_OBJC },
{ "finally", RID_AT_FINALLY, D_OBJC },
{ "synchronized", RID_AT_SYNCHRONIZED, D_OBJC },
/* These are recognized only in protocol-qualifier context
(see above) */
{ "bycopy", RID_BYCOPY, D_OBJC },
{ "byref", RID_BYREF, D_OBJC },
{ "in", RID_IN, D_OBJC },
{ "inout", RID_INOUT, D_OBJC },
{ "oneway", RID_ONEWAY, D_OBJC },
{ "out", RID_OUT, D_OBJC },
};
#define N_reswords (sizeof reswords / sizeof (struct resword))
/* All OpenMP clauses. OpenMP 2.5. */
typedef enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_NONE = 0,
PRAGMA_OMP_CLAUSE_COPYIN,
PRAGMA_OMP_CLAUSE_COPYPRIVATE,
PRAGMA_OMP_CLAUSE_DEFAULT,
PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
PRAGMA_OMP_CLAUSE_IF,
PRAGMA_OMP_CLAUSE_LASTPRIVATE,
PRAGMA_OMP_CLAUSE_NOWAIT,
PRAGMA_OMP_CLAUSE_NUM_THREADS,
PRAGMA_OMP_CLAUSE_ORDERED,
PRAGMA_OMP_CLAUSE_PRIVATE,
PRAGMA_OMP_CLAUSE_REDUCTION,
PRAGMA_OMP_CLAUSE_SCHEDULE,
PRAGMA_OMP_CLAUSE_SHARED
} pragma_omp_clause;
/* Initialization routine for this file. */
void
c_parse_init (void)
{
/* The only initialization required is of the reserved word
identifiers. */
unsigned int i;
tree id;
int mask = (flag_isoc99 ? 0 : D_C89)
| (flag_no_asm ? (flag_isoc99 ? D_EXT : D_EXT|D_EXT89) : 0);
if (!c_dialect_objc ())
mask |= D_OBJC;
ridpointers = GGC_CNEWVEC (tree, (int) RID_MAX);
for (i = 0; i < N_reswords; i++)
{
/* If a keyword is disabled, do not enter it into the table
and so create a canonical spelling that isn't a keyword. */
if (reswords[i].disable & mask)
continue;
id = get_identifier (reswords[i].word);
C_RID_CODE (id) = reswords[i].rid;
C_IS_RESERVED_WORD (id) = 1;
ridpointers [(int) reswords[i].rid] = id;
}
}
/* The C lexer intermediates between the lexer in cpplib and c-lex.c
and the C parser. Unlike the C++ lexer, the parser structure
stores the lexer information instead of using a separate structure.
Identifiers are separated into ordinary identifiers, type names,
keywords and some other Objective-C types of identifiers, and some
look-ahead is maintained.
??? It might be a good idea to lex the whole file up front (as for
C++). It would then be possible to share more of the C and C++
lexer code, if desired. */
/* The following local token type is used. */
/* A keyword. */
#define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1))
/* More information about the type of a CPP_NAME token. */
typedef enum c_id_kind {
/* An ordinary identifier. */
C_ID_ID,
/* An identifier declared as a typedef name. */
C_ID_TYPENAME,
/* An identifier declared as an Objective-C class name. */
C_ID_CLASSNAME,
/* Not an identifier. */
C_ID_NONE
} c_id_kind;
/* A single C token after string literal concatenation and conversion
of preprocessing tokens to tokens. */
typedef struct c_token GTY (())
{
/* The kind of token. */
ENUM_BITFIELD (cpp_ttype) type : 8;
/* If this token is a CPP_NAME, this value indicates whether also
declared as some kind of type. Otherwise, it is C_ID_NONE. */
ENUM_BITFIELD (c_id_kind) id_kind : 8;
/* If this token is a keyword, this value indicates which keyword.
Otherwise, this value is RID_MAX. */
ENUM_BITFIELD (rid) keyword : 8;
/* If this token is a CPP_PRAGMA, this indicates the pragma that
was seen. Otherwise it is PRAGMA_NONE. */
ENUM_BITFIELD (pragma_kind) pragma_kind : 7;
/* True if this token is from a system header. */
BOOL_BITFIELD in_system_header : 1;
/* The value associated with this token, if any. */
tree value;
/* The location at which this token was found. */
location_t location;
} c_token;
/* A parser structure recording information about the state and
context of parsing. Includes lexer information with up to two
tokens of look-ahead; more are not needed for C. */
typedef struct c_parser GTY(())
{
/* The look-ahead tokens. */
c_token tokens[2];
/* How many look-ahead tokens are available (0, 1 or 2). */
short tokens_avail;
/* True if a syntax error is being recovered from; false otherwise.
c_parser_error sets this flag. It should clear this flag when
enough tokens have been consumed to recover from the error. */
BOOL_BITFIELD error : 1;
/* True if we're processing a pragma, and shouldn't automatically
consume CPP_PRAGMA_EOL. */
BOOL_BITFIELD in_pragma : 1;
} c_parser;
/* The actual parser and external interface. ??? Does this need to be
garbage-collected? */
static GTY (()) c_parser *the_parser;
/* Read in and lex a single token, storing it in *TOKEN. */
static void
c_lex_one_token (c_token *token)
{
timevar_push (TV_LEX);
token->type = c_lex_with_flags (&token->value, &token->location, NULL);
token->id_kind = C_ID_NONE;
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
token->in_system_header = in_system_header;
switch (token->type)
{
case CPP_NAME:
{
tree decl;
int objc_force_identifier = objc_need_raw_identifier;
OBJC_NEED_RAW_IDENTIFIER (0);
if (C_IS_RESERVED_WORD (token->value))
{
enum rid rid_code = C_RID_CODE (token->value);
if (c_dialect_objc ())
{
if (!OBJC_IS_AT_KEYWORD (rid_code)
&& (!OBJC_IS_PQ_KEYWORD (rid_code) || objc_pq_context))
{
/* Return the canonical spelling for this keyword. */
token->value = ridpointers[(int) rid_code];
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
else
{
/* Return the canonical spelling for this keyword. */
token->value = ridpointers[(int) rid_code];
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
decl = lookup_name (token->value);
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
{
token->id_kind = C_ID_TYPENAME;
break;
}
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl
&& (global_bindings_p ()
|| (!objc_force_identifier && !decl)))
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
break;
}
}
token->id_kind = C_ID_ID;
}
break;
case CPP_AT_NAME:
/* This only happens in Objective-C; it must be a keyword. */
token->type = CPP_KEYWORD;
token->keyword = C_RID_CODE (token->value);
break;
case CPP_COLON:
case CPP_COMMA:
case CPP_CLOSE_PAREN:
case CPP_SEMICOLON:
/* These tokens may affect the interpretation of any identifiers
following, if doing Objective-C. */
OBJC_NEED_RAW_IDENTIFIER (0);
break;
case CPP_PRAGMA:
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = TREE_INT_CST_LOW (token->value);
token->value = NULL;
break;
default:
break;
}
timevar_pop (TV_LEX);
}
/* Return a pointer to the next token from PARSER, reading it in if
necessary. */
static inline c_token *
c_parser_peek_token (c_parser *parser)
{
if (parser->tokens_avail == 0)
{
c_lex_one_token (&parser->tokens[0]);
parser->tokens_avail = 1;
}
return &parser->tokens[0];
}
/* Return true if the next token from PARSER has the indicated
TYPE. */
static inline bool
c_parser_next_token_is (c_parser *parser, enum cpp_ttype type)
{
return c_parser_peek_token (parser)->type == type;
}
/* Return true if the next token from PARSER does not have the
indicated TYPE. */
static inline bool
c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type)
{
return !c_parser_next_token_is (parser, type);
}
/* Return true if the next token from PARSER is the indicated
KEYWORD. */
static inline bool
c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
/* Check to see if it is the indicated keyword. */
return token->keyword == keyword;
}
/* Return true if TOKEN can start a type name,
false otherwise. */
static bool
c_token_starts_typename (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
return true;
default:
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if the next token from PARSER can start a type name,
false otherwise. */
static inline bool
c_parser_next_token_starts_typename (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
return c_token_starts_typename (token);
}
/* Return true if TOKEN can start declaration specifiers, false
otherwise. */
static bool
c_token_starts_declspecs (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_AUTO:
case RID_THREAD:
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
return true;
default:
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if the next token from PARSER can start declaration
specifiers, false otherwise. */
static inline bool
c_parser_next_token_starts_declspecs (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
return c_token_starts_declspecs (token);
}
/* Return a pointer to the next-but-one token from PARSER, reading it
in if necessary. The next token is already read in. */
static c_token *
c_parser_peek_2nd_token (c_parser *parser)
{
if (parser->tokens_avail >= 2)
return &parser->tokens[1];
gcc_assert (parser->tokens_avail == 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL);
c_lex_one_token (&parser->tokens[1]);
parser->tokens_avail = 2;
return &parser->tokens[1];
}
/* Consume the next token from PARSER. */
static void
c_parser_consume_token (c_parser *parser)
{
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL);
gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA);
if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
}
/* Expect the current token to be a #pragma. Consume it and remember
that we've begun parsing a pragma. */
static void
c_parser_consume_pragma (c_parser *parser)
{
gcc_assert (!parser->in_pragma);
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type == CPP_PRAGMA);
if (parser->tokens_avail == 2)
parser->tokens[0] = parser->tokens[1];
parser->tokens_avail--;
parser->in_pragma = true;
}
/* Update the globals input_location and in_system_header from
TOKEN. */
static inline void
c_parser_set_source_position_from_token (c_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
in_system_header = token->in_system_header;
}
}
/* Issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream of PARSER.
MESSAGE (specified by the caller) is usually of the form "expected
OTHER-TOKEN".
Do not issue a diagnostic if still recovering from an error.
??? This is taken from the C++ parser, but building up messages in
this way is not i18n-friendly and some other approach should be
used. */
static void
c_parser_error (c_parser *parser, const char *gmsgid)
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return;
parser->error = true;
if (!gmsgid)
return;
/* This diagnostic makes more sense if it is tagged to the line of
the token we just peeked at. */
c_parser_set_source_position_from_token (token);
c_parse_error (gmsgid,
/* Because c_parse_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
token->value);
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue the error MSGID. If MSGID is NULL then a message has already
been produced and no message will be produced this time. Returns
true if found, false otherwise. */
static bool
c_parser_require (c_parser *parser,
enum cpp_ttype type,
const char *msgid)
{
if (c_parser_next_token_is (parser, type))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue the error MSGID. Returns true if found, false otherwise. */
static bool
c_parser_require_keyword (c_parser *parser,
enum rid keyword,
const char *msgid)
{
if (c_parser_next_token_is_keyword (parser, keyword))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* Like c_parser_require, except that tokens will be skipped until the
desired token is found. An error message is still produced if the
next token is not as expected. If MSGID is NULL then a message has
already been produced and no message will be produced this
time. */
static void
c_parser_skip_until_found (c_parser *parser,
enum cpp_ttype type,
const char *msgid)
{
unsigned nesting_depth = 0;
if (c_parser_require (parser, type, msgid))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
c_token *token = c_parser_peek_token (parser);
/* If we've reached the token we want, consume it and stop. */
if (token->type == type && !nesting_depth)
{
c_parser_consume_token (parser);
break;
}
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until the end of a parameter is found, but do not
consume the comma, semicolon or closing delimiter. */
static void
c_parser_skip_to_end_of_parameter (c_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON)
&& !nesting_depth)
break;
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Expect to be at the end of the pragma directive and consume an
end of line marker. */
static void
c_parser_skip_to_pragma_eol (c_parser *parser)
{
gcc_assert (parser->in_pragma);
parser->in_pragma = false;
if (!c_parser_require (parser, CPP_PRAGMA_EOL, "expected end of line"))
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
break;
if (token->type == CPP_PRAGMA_EOL)
{
c_parser_consume_token (parser);
break;
}
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested ';'. */
static void
c_parser_skip_to_end_of_block_or_statement (c_parser *parser)
{
unsigned nesting_depth = 0;
bool save_error = parser->error;
while (true)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_EOF:
return;
case CPP_PRAGMA_EOL:
if (parser->in_pragma)
return;
break;
case CPP_SEMICOLON:
/* If the next token is a ';', we have reached the
end of the statement. */
if (!nesting_depth)
{
/* Consume the ';'. */
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested '}', then we have
reached the end of the current block. */
if (nesting_depth == 0 || --nesting_depth == 0)
{
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_OPEN_BRACE:
/* If it the next token is a '{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
case CPP_PRAGMA:
/* If we see a pragma, consume the whole thing at once. We
have some safeguards against consuming pragmas willy-nilly.
Normally, we'd expect to be here with parser->error set,
which disables these safeguards. But it's possible to get
here for secondary error recovery, after parser->error has
been cleared. */
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
parser->error = save_error;
continue;
default:
break;
}
c_parser_consume_token (parser);
}
finished:
parser->error = false;
}
/* Save the warning flags which are controlled by __extension__. */
static inline int
disable_extension_diagnostics (void)
{
int ret = (pedantic
| (warn_pointer_arith << 1)
| (warn_traditional << 2)
| (flag_iso << 3));
pedantic = 0;
warn_pointer_arith = 0;
warn_traditional = 0;
flag_iso = 0;
return ret;
}
/* Restore the warning flags which are controlled by __extension__.
FLAGS is the return value from disable_extension_diagnostics. */
static inline void
restore_extension_diagnostics (int flags)
{
pedantic = flags & 1;
warn_pointer_arith = (flags >> 1) & 1;
warn_traditional = (flags >> 2) & 1;
flag_iso = (flags >> 3) & 1;
}
/* Possibly kinds of declarator to parse. */
typedef enum c_dtr_syn {
/* A normal declarator with an identifier. */
C_DTR_NORMAL,
/* An abstract declarator (maybe empty). */
C_DTR_ABSTRACT,
/* A parameter declarator: may be either, but after a type name does
not redeclare a typedef name as an identifier if it can
alternatively be interpreted as a typedef name; see DR#009,
applied in C90 TC1, omitted from C99 and reapplied in C99 TC2
following DR#249. For example, given a typedef T, "int T" and
"int *T" are valid parameter declarations redeclaring T, while
"int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are
abstract declarators rather than involving redundant parentheses;
the same applies with attributes inside the parentheses before
"T". */
C_DTR_PARM
} c_dtr_syn;
static void c_parser_external_declaration (c_parser *);
static void c_parser_asm_definition (c_parser *);
static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool);
static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool,
bool);
static struct c_typespec c_parser_enum_specifier (c_parser *);
static struct c_typespec c_parser_struct_or_union_specifier (c_parser *);
static tree c_parser_struct_declaration (c_parser *);
static struct c_typespec c_parser_typeof_specifier (c_parser *);
static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn,
bool *);
static struct c_declarator *c_parser_direct_declarator (c_parser *, bool,
c_dtr_syn, bool *);
static struct c_declarator *c_parser_direct_declarator_inner (c_parser *,
bool,
struct c_declarator *);
static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree);
static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree);
static struct c_parm *c_parser_parameter_declaration (c_parser *, tree);
static tree c_parser_simple_asm_expr (c_parser *);
static tree c_parser_attributes (c_parser *);
static struct c_type_name *c_parser_type_name (c_parser *);
static struct c_expr c_parser_initializer (c_parser *);
static struct c_expr c_parser_braced_init (c_parser *, tree, bool);
static void c_parser_initelt (c_parser *);
static void c_parser_initval (c_parser *, struct c_expr *);
static tree c_parser_compound_statement (c_parser *);
static void c_parser_compound_statement_nostart (c_parser *);
static void c_parser_label (c_parser *);
static void c_parser_statement (c_parser *);
static void c_parser_statement_after_labels (c_parser *);
static void c_parser_if_statement (c_parser *);
static void c_parser_switch_statement (c_parser *);
static void c_parser_while_statement (c_parser *);
static void c_parser_do_statement (c_parser *);
static void c_parser_for_statement (c_parser *);
static tree c_parser_asm_statement (c_parser *);
static tree c_parser_asm_operands (c_parser *, bool);
static tree c_parser_asm_clobbers (c_parser *);
static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *);
static struct c_expr c_parser_conditional_expression (c_parser *,
struct c_expr *);
static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_unary_expression (c_parser *);
static struct c_expr c_parser_sizeof_expression (c_parser *);
static struct c_expr c_parser_alignof_expression (c_parser *);
static struct c_expr c_parser_postfix_expression (c_parser *);
static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *,
struct c_type_name *);
static struct c_expr c_parser_postfix_expression_after_primary (c_parser *,
struct c_expr);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
static tree c_parser_expr_list (c_parser *, bool);
static void c_parser_omp_construct (c_parser *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
static void c_parser_omp_flush (c_parser *);
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool c_parser_pragma (c_parser *, enum pragma_context);
/* These Objective-C parser functions are only ever called when
compiling Objective-C. */
static void c_parser_objc_class_definition (c_parser *);
static void c_parser_objc_class_instance_variables (c_parser *);
static void c_parser_objc_class_declaration (c_parser *);
static void c_parser_objc_alias_declaration (c_parser *);
static void c_parser_objc_protocol_definition (c_parser *);
static enum tree_code c_parser_objc_method_type (c_parser *);
static void c_parser_objc_method_definition (c_parser *);
static void c_parser_objc_methodprotolist (c_parser *);
static void c_parser_objc_methodproto (c_parser *);
static tree c_parser_objc_method_decl (c_parser *);
static tree c_parser_objc_type_name (c_parser *);
static tree c_parser_objc_protocol_refs (c_parser *);
static void c_parser_objc_try_catch_statement (c_parser *);
static void c_parser_objc_synchronized_statement (c_parser *);
static tree c_parser_objc_selector (c_parser *);
static tree c_parser_objc_selector_arg (c_parser *);
static tree c_parser_objc_receiver (c_parser *);
static tree c_parser_objc_message_args (c_parser *);
static tree c_parser_objc_keywordexpr (c_parser *);
/* Parse a translation unit (C90 6.7, C99 6.9).
translation-unit:
external-declarations
external-declarations:
external-declaration
external-declarations external-declaration
GNU extensions:
translation-unit:
empty
*/
static void
c_parser_translation_unit (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_EOF))
{
if (pedantic)
pedwarn ("ISO C forbids an empty source file");
}
else
{
void *obstack_position = obstack_alloc (&parser_obstack, 0);
do
{
ggc_collect ();
c_parser_external_declaration (parser);
obstack_free (&parser_obstack, obstack_position);
}
while (c_parser_next_token_is_not (parser, CPP_EOF));
}
}
/* Parse an external declaration (C90 6.7, C99 6.9).
external-declaration:
function-definition
declaration
GNU extensions:
external-declaration:
asm-definition
;
__extension__ external-declaration
Objective-C:
external-declaration:
objc-class-definition
objc-class-declaration
objc-alias-declaration
objc-protocol-definition
objc-method-definition
@end
*/
static void
c_parser_external_declaration (c_parser *parser)
{
int ext;
switch (c_parser_peek_token (parser)->type)
{
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_EXTENSION:
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_external_declaration (parser);
restore_extension_diagnostics (ext);
break;
case RID_ASM:
c_parser_asm_definition (parser);
break;
case RID_AT_INTERFACE:
case RID_AT_IMPLEMENTATION:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_definition (parser);
break;
case RID_AT_CLASS:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_declaration (parser);
break;
case RID_AT_ALIAS:
gcc_assert (c_dialect_objc ());
c_parser_objc_alias_declaration (parser);
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_objc_protocol_definition (parser);
break;
case RID_AT_END:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
objc_finish_implementation ();
break;
default:
goto decl_or_fndef;
}
break;
case CPP_SEMICOLON:
if (pedantic)
pedwarn ("ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external);
break;
case CPP_PLUS:
case CPP_MINUS:
if (c_dialect_objc ())
{
c_parser_objc_method_definition (parser);
break;
}
/* Else fall through, and yield a syntax error trying to parse
as a declaration or function definition. */
default:
decl_or_fndef:
/* A declaration or a function definition. We can only tell
which after parsing the declaration specifiers, if any, and
the first declarator. */
c_parser_declaration_or_fndef (parser, true, true, false, true);
break;
}
}
/* Parse a declaration or function definition (C90 6.5, 6.7.1, C99
6.7, 6.9.1). If FNDEF_OK is true, a function definition is
accepted; otherwise (old-style parameter declarations) only other
declarations are accepted. If NESTED is true, we are inside a
function or parsing old-style parameter declarations; any functions
encountered are nested functions and declaration specifiers are
required; otherwise we are at top level and functions are normal
functions and declaration specifiers may be optional. If EMPTY_OK
is true, empty declarations are OK (subject to all other
constraints); otherwise (old-style parameter declarations) they are
diagnosed. If START_ATTR_OK is true, the declaration specifiers
may start with attributes; otherwise they may not.
declaration:
declaration-specifiers init-declarator-list[opt] ;
function-definition:
declaration-specifiers[opt] declarator declaration-list[opt]
compound-statement
declaration-list:
declaration
declaration-list declaration
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
init-declarator:
declarator simple-asm-expr[opt] attributes[opt]
declarator simple-asm-expr[opt] attributes[opt] = initializer
GNU extensions:
nested-function-definition:
declaration-specifiers declarator declaration-list[opt]
compound-statement
The simple-asm-expr and attributes are GNU extensions.
This function does not handle __extension__; that is handled in its
callers. ??? Following the old parser, __extension__ may start
external declarations, declarations in functions and declarations
at the start of "for" loops, but not old-style parameter
declarations.
C99 requires declaration specifiers in a function definition; the
absence is diagnosed through the diagnosis of implicit int. In GNU
C we also allow but diagnose declarations without declaration
specifiers, but only at top level (elsewhere they conflict with
other syntax).
OpenMP:
declaration:
threadprivate-directive */
static void
c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool empty_ok,
bool nested, bool start_attr_ok)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
bool diagnosed_no_specs = false;
specs = build_null_declspecs ();
c_parser_declspecs (parser, specs, true, true, start_attr_ok);
if (parser->error)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (nested && !specs->declspecs_seen_p)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (empty_ok)
shadow_tag (specs);
else
{
shadow_tag_warned (specs, 1);
pedwarn ("empty declaration");
}
c_parser_consume_token (parser);
return;
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
while (true)
{
struct c_declarator *declarator;
bool dummy = false;
tree fnbody;
/* Declaring either one or more declarators (in which case we
should diagnose if there were no declaration specifiers) or a
function definition (in which case the diagnostic for
implicit int suffices). */
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (c_parser_next_token_is (parser, CPP_EQ)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree asm_name = NULL_TREE;
tree postfix_attrs = NULL_TREE;
if (!diagnosed_no_specs && !specs->declspecs_seen_p)
{
diagnosed_no_specs = true;
pedwarn ("data definition has no type or storage class");
}
/* Having seen a data definition, there cannot now be a
function definition. */
fndef_ok = false;
if (c_parser_next_token_is_keyword (parser, RID_ASM))
asm_name = c_parser_simple_asm_expr (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
tree d;
struct c_expr init;
c_parser_consume_token (parser);
/* The declaration of the variable is in effect while
its initializer is parsed. */
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
start_init (d, asm_name, global_bindings_p ());
init = c_parser_initializer (parser);
finish_init ();
if (d != error_mark_node)
{
maybe_warn_string_init (TREE_TYPE (d), init);
finish_decl (d, init.value, asm_name);
}
}
else
{
tree d = start_decl (declarator, specs, false,
chainon (postfix_attrs,
all_prefix_attrs));
if (d)
finish_decl (d, NULL_TREE, asm_name);
}
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
continue;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
return;
}
else
{
c_parser_error (parser, "expected %<,%> or %<;%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
else if (!fndef_ok)
{
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, "
"%<asm%> or %<__attribute__%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* Function definition (nested or otherwise). */
if (nested)
{
if (pedantic)
pedwarn ("ISO C forbids nested functions");
push_function_context ();
}
if (!start_function (specs, declarator, all_prefix_attrs))
{
/* This can appear in many cases looking nothing like a
function definition, so we don't give a more specific
error suggesting there was one. */
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> "
"or %<__attribute__%>");
if (nested)
pop_function_context ();
break;
}
/* Parse old-style parameter declarations. ??? Attributes are
not allowed to start declaration specifiers here because of a
syntax conflict between a function declaration with attribute
suffix and a function definition with an attribute prefix on
first old-style parameter declaration. Following the old
parser, they are not accepted on subsequent old-style
parameter declarations either. However, there is no
ambiguity after the first declaration, nor indeed on the
first as long as we don't allow postfix attributes after a
declarator with a nonempty identifier list in a definition;
and postfix attributes have never been accepted here in
function definitions either. */
while (c_parser_next_token_is_not (parser, CPP_EOF)
&& c_parser_next_token_is_not (parser, CPP_OPEN_BRACE))
c_parser_declaration_or_fndef (parser, false, false, true, false);
DECL_SOURCE_LOCATION (current_function_decl)
= c_parser_peek_token (parser)->location;
store_parm_decls ();
fnbody = c_parser_compound_statement (parser);
if (nested)
{
tree decl = current_function_decl;
add_stmt (fnbody);
finish_function ();
pop_function_context ();
add_stmt (build_stmt (DECL_EXPR, decl));
}
else
{
add_stmt (fnbody);
finish_function ();
}
break;
}
}
/* Parse an asm-definition (asm() outside a function body). This is a
GNU extension.
asm-definition:
simple-asm-expr ;
*/
static void
c_parser_asm_definition (c_parser *parser)
{
tree asm_str = c_parser_simple_asm_expr (parser);
if (asm_str)
cgraph_add_asm_node (asm_str);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse some declaration specifiers (possibly none) (C90 6.5, C99
6.7), adding them to SPECS (which may already include some).
Storage class specifiers are accepted iff SCSPEC_OK; type
specifiers are accepted iff TYPESPEC_OK; attributes are accepted at
the start iff START_ATTR_OK.
declaration-specifiers:
storage-class-specifier declaration-specifiers[opt]
type-specifier declaration-specifiers[opt]
type-qualifier declaration-specifiers[opt]
function-specifier declaration-specifiers[opt]
Function specifiers (inline) are from C99, and are currently
handled as storage class specifiers, as is __thread.
C90 6.5.1, C99 6.7.1:
storage-class-specifier:
typedef
extern
static
auto
register
C99 6.7.4:
function-specifier:
inline
C90 6.5.2, C99 6.7.2:
type-specifier:
void
char
short
int
long
float
double
signed
unsigned
_Bool
_Complex
[_Imaginary removed in C99 TC2]
struct-or-union-specifier
enum-specifier
typedef-name
(_Bool and _Complex are new in C99.)
C90 6.5.3, C99 6.7.3:
type-qualifier:
const
restrict
volatile
(restrict is new in C99.)
GNU extensions:
declaration-specifiers:
attributes declaration-specifiers[opt]
storage-class-specifier:
__thread
type-specifier:
typeof-specifier
_Decimal32
_Decimal64
_Decimal128
Objective-C:
type-specifier:
class-name objc-protocol-refs[opt]
typedef-name objc-protocol-refs
objc-protocol-refs
*/
static void
c_parser_declspecs (c_parser *parser, struct c_declspecs *specs,
bool scspec_ok, bool typespec_ok, bool start_attr_ok)
{
bool attrs_ok = start_attr_ok;
bool seen_type = specs->type_seen_p;
while (c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD)
|| (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS)))
{
struct c_typespec t;
tree attrs;
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree value = c_parser_peek_token (parser)->value;
c_id_kind kind = c_parser_peek_token (parser)->id_kind;
/* This finishes the specifiers unless a type name is OK, it
is declared as a type name and a type name hasn't yet
been seen. */
if (!typespec_ok || seen_type
|| (kind != C_ID_TYPENAME && kind != C_ID_CLASSNAME))
break;
c_parser_consume_token (parser);
seen_type = true;
attrs_ok = true;
if (kind == C_ID_TYPENAME
&& (!c_dialect_objc ()
|| c_parser_next_token_is_not (parser, CPP_LESS)))
{
t.kind = ctsk_typedef;
/* For a typedef name, record the meaning, not the name.
In case of 'foo foo, bar;'. */
t.spec = lookup_name (value);
}
else
{
tree proto = NULL_TREE;
gcc_assert (c_dialect_objc ());
t.kind = ctsk_objc;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
t.spec = objc_get_protocol_qualified_type (value, proto);
}
declspecs_add_type (specs, t);
continue;
}
if (c_parser_next_token_is (parser, CPP_LESS))
{
/* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" -
nisse@lysator.liu.se. */
tree proto;
gcc_assert (c_dialect_objc ());
if (!typespec_ok || seen_type)
break;
proto = c_parser_objc_protocol_refs (parser);
t.kind = ctsk_objc;
t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto);
declspecs_add_type (specs, t);
continue;
}
gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD));
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_AUTO:
case RID_THREAD:
if (!scspec_ok)
goto out;
attrs_ok = true;
/* TODO: Distinguish between function specifiers (inline)
and storage class specifiers, either here or in
declspecs_add_scspec. */
declspecs_add_scspec (specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
OBJC_NEED_RAW_IDENTIFIER (1);
t.kind = ctsk_resword;
t.spec = c_parser_peek_token (parser)->value;
declspecs_add_type (specs, t);
c_parser_consume_token (parser);
break;
case RID_ENUM:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_enum_specifier (parser);
declspecs_add_type (specs, t);
break;
case RID_STRUCT:
case RID_UNION:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_struct_or_union_specifier (parser);
declspecs_add_type (specs, t);
break;
case RID_TYPEOF:
/* ??? The old parser rejected typeof after other type
specifiers, but is a syntax error the best way of
handling this? */
if (!typespec_ok || seen_type)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_typeof_specifier (parser);
declspecs_add_type (specs, t);
break;
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
attrs_ok = true;
declspecs_add_qual (specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_ATTRIBUTE:
if (!attrs_ok)
goto out;
attrs = c_parser_attributes (parser);
declspecs_add_attrs (specs, attrs);
break;
default:
goto out;
}
}
out: ;
}
/* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2).
enum-specifier:
enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt]
enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt]
enum attributes[opt] identifier
The form with trailing comma is new in C99. The forms with
attributes are GNU extensions. In GNU C, we accept any expression
without commas in the syntax (assignment expressions, not just
conditional expressions); assignment expressions will be diagnosed
as non-constant.
enumerator-list:
enumerator
enumerator-list , enumerator
enumerator:
enumeration-constant
enumeration-constant = constant-expression
*/
static struct c_typespec
c_parser_enum_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM));
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse an enum definition. */
tree type = start_enum (ident);
tree postfix_attrs;
/* We chain the enumerators in reverse order, then put them in
forward order at the end. */
tree values = NULL_TREE;
c_parser_consume_token (parser);
while (true)
{
tree enum_id;
tree enum_value;
tree enum_decl;
bool seen_comma;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
enum_id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
enum_value = c_parser_expr_no_commas (parser, NULL).value;
}
else
enum_value = NULL_TREE;
enum_decl = build_enumerator (enum_id, enum_value);
TREE_CHAIN (enum_decl) = values;
values = enum_decl;
seen_comma = false;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
seen_comma = true;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (seen_comma && pedantic && !flag_isoc99)
pedwarn ("comma at end of enumerator list");
c_parser_consume_token (parser);
break;
}
if (!seen_comma)
{
c_parser_error (parser, "expected %<,%> or %<}%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_enum (type, nreverse (values),
chainon (attrs, postfix_attrs));
ret.kind = ctsk_tagdef;
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
return ret;
}
ret = parser_xref_tag (ENUMERAL_TYPE, ident);
/* In ISO C, enumerated types can be referred to only if already
defined. */
if (pedantic && !COMPLETE_TYPE_P (ret.spec))
pedwarn ("ISO C forbids forward references to %<enum%> types");
return ret;
}
/* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1).
struct-or-union-specifier:
struct-or-union attributes[opt] identifier[opt]
{ struct-contents } attributes[opt]
struct-or-union attributes[opt] identifier
struct-contents:
struct-declaration-list
struct-declaration-list:
struct-declaration ;
struct-declaration-list struct-declaration ;
GNU extensions:
struct-contents:
empty
struct-declaration
struct-declaration-list struct-declaration
struct-declaration-list:
struct-declaration-list ;
;
(Note that in the syntax here, unlike that in ISO C, the semicolons
are included here rather than in struct-declaration, in order to
describe the syntax with extra semicolons and missing semicolon at
end.)
Objective-C:
struct-declaration-list:
@defs ( class-name )
(Note this does not include a trailing semicolon, but can be
followed by further declarations, and gets a pedwarn-if-pedantic
when followed by a semicolon.) */
static struct c_typespec
c_parser_struct_or_union_specifier (c_parser *parser)
{
struct c_typespec ret;
tree attrs;
tree ident = NULL_TREE;
enum tree_code code;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STRUCT:
code = RECORD_TYPE;
break;
case RID_UNION:
code = UNION_TYPE;
break;
default:
gcc_unreachable ();
}
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
tree type = start_struct (code, ident);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
forward order at the end. Each struct-declaration may
declare multiple components (comma-separated), so we must use
chainon to join them, although when parsing each
struct-declaration we can use TREE_CHAIN directly.
The theory behind all this is that there will be more
semicolon separated fields than comma separated fields, and
so we'll be minimizing the number of node traversals required
by chainon. */
tree contents = NULL_TREE;
c_parser_consume_token (parser);
/* Handle the Objective-C @defs construct,
e.g. foo(sizeof(struct{ @defs(ClassName) }));. */
if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS))
{
tree name;
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto end_at_defs;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected class name");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto end_at_defs;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
contents = nreverse (objc_get_class_ivars (name));
}
end_at_defs:
/* Parse the struct-declarations and semicolons. Problems with
semicolons are diagnosed here; empty structures are diagnosed
elsewhere. */
while (true)
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (pedantic)
pedwarn ("extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the struct or union contents. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Accept #pragmas at struct scope. */
if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external);
continue;
}
/* Parse some comma-separated declarations, but not the
trailing semicolon if any. */
decls = c_parser_struct_declaration (parser);
contents = chainon (decls, contents);
/* If no semicolon follows, either we have a parse error or
are at the end of the struct or union and should
pedwarn. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
pedwarn ("no semicolon at end of struct or union");
else
{
c_parser_error (parser, "expected %<;%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
break;
}
}
}
postfix_attrs = c_parser_attributes (parser);
ret.spec = finish_struct (type, nreverse (contents),
chainon (attrs, postfix_attrs));
ret.kind = ctsk_tagdef;
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
return ret;
}
ret = parser_xref_tag (code, ident);
return ret;
}
/* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without*
the trailing semicolon.
struct-declaration:
specifier-qualifier-list struct-declarator-list
specifier-qualifier-list:
type-specifier specifier-qualifier-list[opt]
type-qualifier specifier-qualifier-list[opt]
attributes specifier-qualifier-list[opt]
struct-declarator-list:
struct-declarator
struct-declarator-list , attributes[opt] struct-declarator
struct-declarator:
declarator attributes[opt]
declarator[opt] : constant-expression attributes[opt]
GNU extensions:
struct-declaration:
__extension__ struct-declaration
specifier-qualifier-list
Unlike the ISO C syntax, semicolons are handled elsewhere. The use
of attributes where shown is a GNU extension. In GNU C, we accept
any expression without commas in the syntax (assignment
expressions, not just conditional expressions); assignment
expressions will be diagnosed as non-constant. */
static tree
c_parser_struct_declaration (c_parser *parser)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
tree decls;
if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
int ext;
tree decl;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
decl = c_parser_struct_declaration (parser);
restore_extension_diagnostics (ext);
return decl;
}
specs = build_null_declspecs ();
c_parser_declspecs (parser, specs, false, true, true);
if (parser->error)
return NULL_TREE;
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL_TREE;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree ret;
if (!specs->type_seen_p)
{
if (pedantic)
pedwarn ("ISO C forbids member declarations with no members");
shadow_tag_warned (specs, pedantic);
ret = NULL_TREE;
}
else
{
/* Support for unnamed structs or unions as members of
structs or unions (which is [a] useful and [b] supports
MS P-SDK). */
ret = grokfield (build_id_declarator (NULL_TREE), specs, NULL_TREE);
}
return ret;
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
decls = NULL_TREE;
while (true)
{
/* Declaring one or more declarators or un-named bit-fields. */
struct c_declarator *declarator;
bool dummy = false;
if (c_parser_next_token_is (parser, CPP_COLON))
declarator = build_id_declarator (NULL_TREE);
else
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
break;
}
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree postfix_attrs = NULL_TREE;
tree width = NULL_TREE;
tree d;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
width = c_parser_expr_no_commas (parser, NULL).value;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
d = grokfield (declarator, specs, width);
decl_attributes (&d, chainon (postfix_attrs,
all_prefix_attrs), 0);
TREE_CHAIN (d) = decls;
decls = d;
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
/* Semicolon consumed in caller. */
break;
}
else
{
c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>");
break;
}
}
else
{
c_parser_error (parser,
"expected %<:%>, %<,%>, %<;%>, %<}%> or "
"%<__attribute__%>");
break;
}
}
return decls;
}
/* Parse a typeof specifier (a GNU extension).
typeof-specifier:
typeof ( expression )
typeof ( type-name )
*/
static struct c_typespec
c_parser_typeof_specifier (c_parser *parser)
{
struct c_typespec ret;
ret.kind = ctsk_typeof;
ret.spec = error_mark_node;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF));
c_parser_consume_token (parser);
skip_evaluation++;
in_typeof++;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
skip_evaluation--;
in_typeof--;
return ret;
}
if (c_parser_next_token_starts_typename (parser))
{
struct c_type_name *type = c_parser_type_name (parser);
skip_evaluation--;
in_typeof--;
if (type != NULL)
{
ret.spec = groktypename (type);
pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE));
}
}
else
{
bool was_vm;
struct c_expr expr = c_parser_expression (parser);
skip_evaluation--;
in_typeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error ("%<typeof%> applied to a bit-field");
ret.spec = TREE_TYPE (expr.value);
was_vm = variably_modified_type_p (ret.spec, NULL_TREE);
/* This should be returned with the type so that when the type
is evaluated, this can be evaluated. For now, we avoid
evaluation when the context might. */
if (!skip_evaluation && was_vm)
{
tree e = expr.value;
/* If the expression is not of a type to which we cannot assign a line
number, wrap the thing in a no-op NOP_EXPR. */
if (DECL_P (e) || CONSTANT_CLASS_P (e))
e = build1 (NOP_EXPR, void_type_node, e);
if (EXPR_P (e))
SET_EXPR_LOCATION (e, input_location);
add_stmt (e);
}
pop_maybe_used (was_vm);
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
return ret;
}
/* Parse a declarator, possibly an abstract declarator (C90 6.5.4,
6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may
be redeclared; otherwise it may not. KIND indicates which kind of
declarator is wanted. Returns a valid declarator except in the
case of a syntax error in which case NULL is returned. *SEEN_ID is
set to true if an identifier being declared is seen; this is used
to diagnose bad forms of abstract array declarators and to
determine whether an identifier list is syntactically permitted.
declarator:
pointer[opt] direct-declarator
direct-declarator:
identifier
( attributes[opt] declarator )
direct-declarator array-declarator
direct-declarator ( parameter-type-list )
direct-declarator ( identifier-list[opt] )
pointer:
* type-qualifier-list[opt]
* type-qualifier-list[opt] pointer
type-qualifier-list:
type-qualifier
attributes
type-qualifier-list type-qualifier
type-qualifier-list attributes
parameter-type-list:
parameter-list
parameter-list , ...
parameter-list:
parameter-declaration
parameter-list , parameter-declaration
parameter-declaration:
declaration-specifiers declarator attributes[opt]
declaration-specifiers abstract-declarator[opt] attributes[opt]
identifier-list:
identifier
identifier-list , identifier
abstract-declarator:
pointer
pointer[opt] direct-abstract-declarator
direct-abstract-declarator:
( attributes[opt] abstract-declarator )
direct-abstract-declarator[opt] array-declarator
direct-abstract-declarator[opt] ( parameter-type-list[opt] )
GNU extensions:
direct-declarator:
direct-declarator ( parameter-forward-declarations
parameter-type-list[opt] )
direct-abstract-declarator:
direct-abstract-declarator[opt] ( parameter-forward-declarations
parameter-type-list[opt] )
parameter-forward-declarations:
parameter-list ;
parameter-forward-declarations parameter-list ;
The uses of attributes shown above are GNU extensions.
Some forms of array declarator are not included in C99 in the
syntax for abstract declarators; these are disallowed elsewhere.
This may be a defect (DR#289).
This function also accepts an omitted abstract declarator as being
an abstract declarator, although not part of the formal syntax. */
static struct c_declarator *
c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* Parse any initial pointer part. */
if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_declspecs *quals_attrs = build_null_declspecs ();
struct c_declarator *inner;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true);
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner == NULL)
return NULL;
else
return make_pointer_declarator (quals_attrs, inner);
}
/* Now we have a direct declarator, direct abstract declarator or
nothing (which counts as a direct abstract declarator here). */
return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id);
}
/* Parse a direct declarator or direct abstract declarator; arguments
as c_parser_declarator. */
static struct c_declarator *
c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* The direct declarator must start with an identifier (possibly
omitted) or a parenthesized declarator (possibly abstract). In
an ordinary declarator, initial parentheses must start a
parenthesized declarator. In an abstract declarator or parameter
declarator, they could start a parenthesized declarator or a
parameter list. To tell which, the open parenthesis and any
following attributes must be read. If a declaration specifier
follows, then it is a parameter list; if the specifier is a
typedef name, there might be an ambiguity about redeclaring it,
which is resolved in the direction of treating it as a typedef
name. If a close parenthesis follows, it is also an empty
parameter list, as the syntax does not permit empty abstract
declarators. Otherwise, it is a parenthesized declarator (in
which case the analysis may be repeated inside it, recursively).
??? There is an ambiguity in a parameter declaration "int
(__attribute__((foo)) x)", where x is not a typedef name: it
could be an abstract declarator for a function, or declare x with
parentheses. The proper resolution of this ambiguity needs
documenting. At present we follow an accident of the old
parser's implementation, whereby the first parameter must have
some declaration specifiers other than just attributes. Thus as
a parameter declaration it is treated as a parenthesized
parameter named x, and as an abstract declarator it is
rejected.
??? Also following the old parser, attributes inside an empty
parameter list are ignored, making it a list not yielding a
prototype, rather than giving an error or making it have one
parameter with implicit type int.
??? Also following the old parser, typedef names may be
redeclared in declarators, but not Objective-C class names. */
if (kind != C_DTR_ABSTRACT
&& c_parser_next_token_is (parser, CPP_NAME)
&& ((type_seen_p
&& c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME)
|| c_parser_peek_token (parser)->id_kind == C_ID_ID))
{
struct c_declarator *inner
= build_id_declarator (c_parser_peek_token (parser)->value);
*seen_id = true;
inner->id_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
if (kind != C_DTR_NORMAL
&& c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
struct c_declarator *inner = build_id_declarator (NULL_TREE);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
/* Either we are at the end of an abstract declarator, or we have
parentheses. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_declarator *inner;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
if (kind != C_DTR_NORMAL
&& (c_parser_next_token_starts_declspecs (parser)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN)))
{
struct c_arg_info *args
= c_parser_parms_declarator (parser, kind == C_DTR_NORMAL,
attrs);
if (args == NULL)
return NULL;
else
{
inner
= build_function_declarator (args,
build_id_declarator (NULL_TREE));
return c_parser_direct_declarator_inner (parser, *seen_id,
inner);
}
}
/* A parenthesized declarator. */
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner != NULL && attrs != NULL)
inner = build_attrs_declarator (attrs, inner);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (inner == NULL)
return NULL;
else
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
else
{
if (kind == C_DTR_NORMAL)
{
c_parser_error (parser, "expected identifier or %<(%>");
return NULL;
}
else
return build_id_declarator (NULL_TREE);
}
}
/* Parse part of a direct declarator or direct abstract declarator,
given that some (in INNER) has already been parsed; ID_PRESENT is
true if an identifier is present, false for an abstract
declarator. */
static struct c_declarator *
c_parser_direct_declarator_inner (c_parser *parser, bool id_present,
struct c_declarator *inner)
{
/* Parse a sequence of array declarators and parameter lists. */
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
struct c_declarator *declarator;
struct c_declspecs *quals_attrs = build_null_declspecs ();
bool static_seen;
bool star_seen;
tree dimen;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true);
static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC);
if (static_seen)
c_parser_consume_token (parser);
if (static_seen && !quals_attrs->declspecs_seen_p)
c_parser_declspecs (parser, quals_attrs, false, false, true);
if (!quals_attrs->declspecs_seen_p)
quals_attrs = NULL;
/* If "static" is present, there must be an array dimension.
Otherwise, there may be a dimension, "*", or no
dimension. */
if (static_seen)
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
dimen = NULL_TREE;
star_seen = false;
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE)
{
dimen = NULL_TREE;
star_seen = true;
c_parser_consume_token (parser);
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL).value;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
c_parser_consume_token (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
return NULL;
}
declarator = build_array_declarator (dimen, quals_attrs, static_seen,
star_seen);
if (declarator == NULL)
return NULL;
inner = set_array_declarator_inner (declarator, inner, !id_present);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_arg_info *args;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
args = c_parser_parms_declarator (parser, id_present, attrs);
if (args == NULL)
return NULL;
else
{
inner = build_function_declarator (args, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
}
return inner;
}
/* Parse a parameter list or identifier list, including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. ID_LIST_OK is true if an identifier list is
acceptable; such a list must not have attributes at the start. */
static struct c_arg_info *
c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs)
{
push_scope ();
declare_parm_level ();
/* If the list starts with an identifier, it is an identifier list.
Otherwise, it is either a prototype list or an empty list. */
if (id_list_ok
&& !attrs
&& c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree list = NULL_TREE, *nextp = &list;
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
*nextp = build_tree_list (NULL_TREE,
c_parser_peek_token (parser)->value);
nextp = & TREE_CHAIN (*nextp);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_error (parser, "expected identifier");
break;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = list;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
c_parser_consume_token (parser);
pop_scope ();
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pop_scope ();
return NULL;
}
}
else
{
struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs);
pop_scope ();
return ret;
}
}
/* Parse a parameter list (possibly empty), including the closing
parenthesis but not the opening one. ATTRS are the attributes at
the start of the list. */
static struct c_arg_info *
c_parser_parms_list_declarator (c_parser *parser, tree attrs)
{
bool good_parm = false;
/* ??? Following the old parser, forward parameter declarations may
use abstract declarators, and if no real parameter declarations
follow the forward declarations then this is not diagnosed. Also
note as above that attributes are ignored as the only contents of
the parentheses, or as the only contents after forward
declarations. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
c_parser_consume_token (parser);
return ret;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
/* Suppress -Wold-style-definition for this case. */
ret->types = error_mark_node;
error ("ISO C requires a named argument before %<...%>");
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
/* Nonempty list of parameters, either terminated with semicolon
(forward declarations; recurse) or with close parenthesis (normal
function) or with ", ... )" (variadic function). */
while (true)
{
/* Parse a parameter. */
struct c_parm *parm = c_parser_parameter_declaration (parser, attrs);
attrs = NULL_TREE;
if (parm != NULL)
{
good_parm = true;
push_parm_decl (parm);
}
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree new_attrs;
c_parser_consume_token (parser);
mark_forward_parm_decls ();
new_attrs = c_parser_attributes (parser);
return c_parser_parms_list_declarator (parser, new_attrs);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (good_parm)
return get_parm_info (false);
else
{
struct c_arg_info *ret
= XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
return ret;
}
}
if (!c_parser_require (parser, CPP_COMMA,
"expected %<;%>, %<,%> or %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (good_parm)
return get_parm_info (true);
else
{
struct c_arg_info *ret
= XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = 0;
ret->tags = 0;
ret->types = 0;
ret->others = 0;
ret->pending_sizes = 0;
ret->had_vla_unspec = 0;
return ret;
}
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
}
}
/* Parse a parameter declaration. ATTRS are the attributes at the
start of the declaration if it is the first parameter. */
static struct c_parm *
c_parser_parameter_declaration (c_parser *parser, tree attrs)
{
struct c_declspecs *specs;
struct c_declarator *declarator;
tree prefix_attrs;
tree postfix_attrs = NULL_TREE;
bool dummy = false;
if (!c_parser_next_token_starts_declspecs (parser))
{
/* ??? In some Objective-C cases '...' isn't applicable so there
should be a different message. */
c_parser_error (parser,
"expected declaration specifiers or %<...%>");
c_parser_skip_to_end_of_parameter (parser);
return NULL;
}
specs = build_null_declspecs ();
if (attrs)
{
declspecs_add_attrs (specs, attrs);
attrs = NULL_TREE;
}
c_parser_declspecs (parser, specs, true, true, true);
finish_declspecs (specs);
pending_xref_error ();
prefix_attrs = specs->attrs;
specs->attrs = NULL_TREE;
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_PARM, &dummy);
if (declarator == NULL)
{
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
return NULL;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_attributes (parser);
return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs),
declarator);
}
/* Parse a string literal in an asm expression. It should not be
translated, and wide string literals are an error although
permitted by the syntax. This is a GNU extension.
asm-string-literal:
string-literal
??? At present, following the old parser, the caller needs to have
set c_lex_string_translate to 0. It would be better to follow the
C++ parser rather than using the c_lex_string_translate kludge. */
static tree
c_parser_asm_string_literal (c_parser *parser)
{
tree str;
if (c_parser_next_token_is (parser, CPP_STRING))
{
str = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_WSTRING))
{
error ("wide string literal in %<asm%>");
str = build_string (1, "");
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected string literal");
str = NULL_TREE;
}
return str;
}
/* Parse a simple asm expression. This is used in restricted
contexts, where a full expression with inputs and outputs does not
make sense. This is a GNU extension.
simple-asm-expr:
asm ( asm-string-literal )
*/
static tree
c_parser_simple_asm_expr (c_parser *parser)
{
tree str;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
/* ??? Follow the C++ parser rather than using the
c_lex_string_translate kludge. */
c_lex_string_translate = 0;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
return NULL_TREE;
}
str = c_parser_asm_string_literal (parser);
c_lex_string_translate = 1;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
return str;
}
/* Parse (possibly empty) attributes. This is a GNU extension.
attributes:
empty
attributes attribute
attribute:
__attribute__ ( ( attribute-list ) )
attribute-list:
attrib
attribute_list , attrib
attrib:
empty
any-word
any-word ( identifier )
any-word ( identifier , nonempty-expr-list )
any-word ( expr-list )
where the "identifier" must not be declared as a type, and
"any-word" may be any identifier (including one declared as a
type), a reserved word storage class specifier, type specifier or
type qualifier. ??? This still leaves out most reserved keywords
(following the old parser), shouldn't we include them, and why not
allow identifiers declared as types to start the arguments? */
static tree
c_parser_attributes (c_parser *parser)
{
tree attrs = NULL_TREE;
while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
/* ??? Follow the C++ parser rather than using the
c_lex_string_translate kludge. */
c_lex_string_translate = 0;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
return attrs;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return attrs;
}
/* Parse the attribute list. */
while (c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD))
{
tree attr, attr_name, attr_args;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
if (c_parser_next_token_is (parser, CPP_KEYWORD))
{
/* ??? See comment above about what keywords are
accepted here. */
bool ok;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_SHORT:
case RID_INLINE:
case RID_VOLATILE:
case RID_SIGNED:
case RID_AUTO:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_THREAD:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
case RID_BOOL:
ok = true;
break;
default:
ok = false;
break;
}
if (!ok)
break;
}
attr_name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
attr = build_tree_list (attr_name, NULL_TREE);
attrs = chainon (attrs, attr);
continue;
}
c_parser_consume_token (parser);
/* Parse the attribute contents. If they start with an
identifier which is followed by a comma or close
parenthesis, then the arguments start with that
identifier; otherwise they are an expression list. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
&& ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
|| (c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN)))
{
tree arg1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = build_tree_list (NULL_TREE, arg1);
else
{
c_parser_consume_token (parser);
attr_args = tree_cons (NULL_TREE, arg1,
c_parser_expr_list (parser, false));
}
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = NULL_TREE;
else
attr_args = c_parser_expr_list (parser, false);
}
attr = build_tree_list (attr_name, attr_args);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
attrs = chainon (attrs, attr);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
c_lex_string_translate = 1;
}
return attrs;
}
/* Parse a type name (C90 6.5.5, C99 6.7.6).
type-name:
specifier-qualifier-list abstract-declarator[opt]
*/
static struct c_type_name *
c_parser_type_name (c_parser *parser)
{
struct c_declspecs *specs = build_null_declspecs ();
struct c_declarator *declarator;
struct c_type_name *ret;
bool dummy = false;
c_parser_declspecs (parser, specs, false, true, true);
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL;
}
pending_xref_error ();
finish_declspecs (specs);
declarator = c_parser_declarator (parser, specs->type_seen_p,
C_DTR_ABSTRACT, &dummy);
if (declarator == NULL)
return NULL;
ret = XOBNEW (&parser_obstack, struct c_type_name);
ret->specs = specs;
ret->declarator = declarator;
return ret;
}
/* Parse an initializer (C90 6.5.7, C99 6.7.8).
initializer:
assignment-expression
{ initializer-list }
{ initializer-list , }
initializer-list:
designation[opt] initializer
initializer-list , designation[opt] initializer
designation:
designator-list =
designator-list:
designator
designator-list designator
designator:
array-designator
. identifier
array-designator:
[ constant-expression ]
GNU extensions:
initializer:
{ }
designation:
array-designator
identifier :
array-designator:
[ constant-expression ... constant-expression ]
Any expression without commas is accepted in the syntax for the
constant-expressions, with non-constant expressions rejected later.
This function is only used for top-level initializers; for nested
ones, see c_parser_initval. */
static struct c_expr
c_parser_initializer (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_braced_init (parser, NULL_TREE, false);
else
{
struct c_expr ret;
ret = c_parser_expr_no_commas (parser, NULL);
if (TREE_CODE (ret.value) != STRING_CST
&& TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR)
ret = default_function_array_conversion (ret);
return ret;
}
}
/* Parse a braced initializer list. TYPE is the type specified for a
compound literal, and NULL_TREE for other initializers and for
nested braced lists. NESTED_P is true for nested braced lists,
false for the list of a compound literal or the list that is the
top-level initializer in a declaration. */
static struct c_expr
c_parser_braced_init (c_parser *parser, tree type, bool nested_p)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
if (nested_p)
push_init_level (0);
else
really_start_incremental_init (type);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (pedantic)
pedwarn ("ISO C forbids empty initializer braces");
}
else
{
/* Parse a non-empty initializer list, possibly with a trailing
comma. */
while (true)
{
c_parser_initelt (parser);
if (parser->error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
}
}
if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
struct c_expr ret;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>");
return ret;
}
c_parser_consume_token (parser);
return pop_init_level (0);
}
/* Parse a nested initializer, including designators. */
static void
c_parser_initelt (c_parser *parser)
{
/* Parse any designator or designator list. A single array
designator may have the subsequent "=" omitted in GNU C, but a
longer list or a structure member designator may not. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
/* Old-style structure member designator. */
set_init_label (c_parser_peek_token (parser)->value);
if (pedantic)
pedwarn ("obsolete use of designated initializer with %<:%>");
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
/* des_seen is 0 if there have been no designators, 1 if there
has been a single array designator and 2 otherwise. */
int des_seen = 0;
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser, CPP_DOT))
{
int des_prev = des_seen;
if (des_seen < 2)
des_seen++;
if (c_parser_next_token_is (parser, CPP_DOT))
{
des_seen = 2;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
set_init_label (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
struct c_expr init;
init.value = error_mark_node;
init.original_code = ERROR_MARK;
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (init);
return;
}
}
else
{
tree first, second;
/* ??? Following the old parser, [ objc-receiver
objc-message-args ] is accepted as an initializer,
being distinguished from a designator by what follows
the first assignment expression inside the square
brackets, but after a first array designator a
subsequent square bracket is for Objective-C taken to
start an expression, using the obsolete form of
designated initializer without '=', rather than
possibly being a second level of designation: in LALR
terms, the '[' is shifted rather than reducing
designator to designator-list. */
if (des_prev == 1 && c_dialect_objc ())
{
des_seen = des_prev;
break;
}
if (des_prev == 0 && c_dialect_objc ())
{
/* This might be an array designator or an
Objective-C message expression. If the former,
continue parsing here; if the latter, parse the
remainder of the initializer given the starting
primary-expression. ??? It might make sense to
distinguish when des_prev == 1 as well; see
previous comment. */
tree rec, args;
struct c_expr mexpr;
c_parser_consume_token (parser);
if (c_parser_peek_token (parser)->type == CPP_NAME
&& ((c_parser_peek_token (parser)->id_kind
== C_ID_TYPENAME)
|| (c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
/* Type name receiver. */
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
rec = objc_get_class_reference (id);
goto parse_message_args;
}
first = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_ELLIPSIS)
|| c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
goto array_desig_after_first;
/* Expression receiver. So far only one part
without commas has been parsed; there might be
more of the expression. */
rec = first;
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
c_parser_consume_token (parser);
next = c_parser_expr_no_commas (parser, NULL);
next = default_function_array_conversion (next);
rec = build_compound_expr (rec, next.value);
}
parse_message_args:
/* Now parse the objc-message-args. */
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
mexpr.value
= objc_build_message_expr (build_tree_list (rec, args));
mexpr.original_code = ERROR_MARK;
/* Now parse and process the remainder of the
initializer, starting with this message
expression as a primary-expression. */
c_parser_initval (parser, &mexpr);
return;
}
c_parser_consume_token (parser);
first = c_parser_expr_no_commas (parser, NULL).value;
array_desig_after_first:
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
second = c_parser_expr_no_commas (parser, NULL).value;
}
else
second = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
c_parser_consume_token (parser);
set_init_index (first, second);
if (pedantic && second)
pedwarn ("ISO C forbids specifying range of "
"elements to initialize");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
}
if (des_seen >= 1)
{
if (c_parser_next_token_is (parser, CPP_EQ))
{
if (pedantic && !flag_isoc99)
pedwarn ("ISO C90 forbids specifying subobject to initialize");
c_parser_consume_token (parser);
}
else
{
if (des_seen == 1)
{
if (pedantic)
pedwarn ("obsolete use of designated initializer "
"without %<=%>");
}
else
{
struct c_expr init;
init.value = error_mark_node;
init.original_code = ERROR_MARK;
c_parser_error (parser, "expected %<=%>");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (init);
return;
}
}
}
}
c_parser_initval (parser, NULL);
}
/* Parse a nested initializer; as c_parser_initializer but parses
initializers within braced lists, after any designators have been
applied. If AFTER is not NULL then it is an Objective-C message
expression which is the primary-expression starting the
initializer. */
static void
c_parser_initval (c_parser *parser, struct c_expr *after)
{
struct c_expr init;
gcc_assert (!after || c_dialect_objc ());
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after)
init = c_parser_braced_init (parser, NULL_TREE, true);
else
{
init = c_parser_expr_no_commas (parser, after);
if (init.value != NULL_TREE
&& TREE_CODE (init.value) != STRING_CST
&& TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR)
init = default_function_array_conversion (init);
}
process_init_element (init);
}
/* Parse a compound statement (possibly a function body) (C90 6.6.2,
C99 6.8.2).
compound-statement:
{ block-item-list[opt] }
{ label-declarations block-item-list }
block-item-list:
block-item
block-item-list block-item
block-item:
nested-declaration
statement
nested-declaration:
declaration
GNU extensions:
compound-statement:
{ label-declarations block-item-list }
nested-declaration:
__extension__ nested-declaration
nested-function-definition
label-declarations:
label-declaration
label-declarations label-declaration
label-declaration:
__label__ identifier-list ;
Allowing the mixing of declarations and code is new in C99. The
GNU syntax also permits (not shown above) labels at the end of
compound statements, which yield an error. We don't allow labels
on declarations; this might seem like a natural extension, but
there would be a conflict between attributes on the label and
prefix attributes on the declaration. ??? The syntax follows the
old parser in requiring something after label declarations.
Although they are erroneous if the labels declared aren't defined,
is it useful for the syntax to be this way?
OpenMP:
block-item:
openmp-directive
openmp-directive:
barrier-directive
flush-directive */
static tree
c_parser_compound_statement (c_parser *parser)
{
tree stmt;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
return error_mark_node;
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
return c_end_compound_stmt (stmt, true);
}
/* Parse a compound statement except for the opening brace. This is
used for parsing both compound statements and statement expressions
(which follow different paths to handling the opening). */
static void
c_parser_compound_statement_nostart (c_parser *parser)
{
bool last_stmt = false;
bool last_label = false;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
return;
}
if (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
/* Read zero or more forward-declarations for labels that nested
functions can jump to. */
while (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names,
are OK here. */
while (true)
{
tree label;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
label
= declare_label (c_parser_peek_token (parser)->value);
C_DECLARED_LABEL_FLAG (label) = 1;
add_stmt (build_stmt (DECL_EXPR, label));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* ??? Locating this diagnostic on the token after the
declarations end follows the old parser, but it might be
better to locate it where the declarations start instead. */
if (pedantic)
pedwarn ("ISO C forbids label declarations");
}
/* We must now have at least one statement, label or declaration. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_error (parser, "expected declaration or statement");
c_parser_consume_token (parser);
return;
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
{
last_label = true;
last_stmt = false;
c_parser_label (parser);
}
else if (!last_label
&& c_parser_next_token_starts_declspecs (parser))
{
last_label = false;
c_parser_declaration_or_fndef (parser, true, true, true, true);
if (last_stmt
&& ((pedantic && !flag_isoc99)
|| warn_declaration_after_statement))
pedwarn_c90 ("%HISO C90 forbids mixed declarations and code",
&loc);
last_stmt = false;
}
else if (!last_label
&& c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
last_label = false;
c_parser_declaration_or_fndef (parser, true, true, true, true);
/* Following the old parser, __extension__ does not
disable this diagnostic. */
restore_extension_diagnostics (ext);
if (last_stmt
&& ((pedantic && !flag_isoc99)
|| warn_declaration_after_statement))
pedwarn_c90 ("%HISO C90 forbids mixed declarations and code",
&loc);
last_stmt = false;
}
else
goto statement;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
/* External pragmas, and some omp pragmas, are not associated
with regular c code, and so are not to be considered statements
syntactically. This ensures that the user doesn't put them
places that would turn into syntax errors if the directive
were ignored. */
if (c_parser_pragma (parser, pragma_compound))
last_label = false, last_stmt = true;
}
else if (c_parser_next_token_is (parser, CPP_EOF))
{
c_parser_error (parser, "expected declaration or statement");
return;
}
else
{
statement:
last_label = false;
last_stmt = true;
c_parser_statement_after_labels (parser);
}
parser->error = false;
}
if (last_label)
error ("label at end of compound statement");
c_parser_consume_token (parser);
}
/* Parse a label (C90 6.6.1, C99 6.8.1).
label:
identifier : attributes[opt]
case constant-expression :
default :
GNU extensions:
label:
case constant-expression ... constant-expression :
The use of attributes on labels is a GNU extension. The syntax in
GNU C accepts any expressions without commas, non-constant
expressions being rejected later. */
static void
c_parser_label (c_parser *parser)
{
location_t loc1 = c_parser_peek_token (parser)->location;
tree label = NULL_TREE;
if (c_parser_next_token_is_keyword (parser, RID_CASE))
{
tree exp1, exp2;
c_parser_consume_token (parser);
exp1 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
label = do_case (exp1, NULL_TREE);
}
else if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
exp2 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (exp1, exp2);
}
else
c_parser_error (parser, "expected %<:%> or %<...%>");
}
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
{
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (NULL_TREE, NULL_TREE);
}
else
{
tree name = c_parser_peek_token (parser)->value;
tree tlab;
location_t loc2;
tree attrs;
gcc_assert (c_parser_next_token_is (parser, CPP_NAME));
c_parser_consume_token (parser);
gcc_assert (c_parser_next_token_is (parser, CPP_COLON));
loc2 = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
attrs = c_parser_attributes (parser);
tlab = define_label (loc2, name);
if (tlab)
{
decl_attributes (&tlab, attrs, 0);
label = add_stmt (build_stmt (LABEL_EXPR, tlab));
}
}
if (label)
SET_EXPR_LOCATION (label, loc1);
}
/* Parse a statement (C90 6.6, C99 6.8).
statement:
labeled-statement
compound-statement
expression-statement
selection-statement
iteration-statement
jump-statement
labeled-statement:
label statement
expression-statement:
expression[opt] ;
selection-statement:
if-statement
switch-statement
iteration-statement:
while-statement
do-statement
for-statement
jump-statement:
goto identifier ;
continue ;
break ;
return expression[opt] ;
GNU extensions:
statement:
asm-statement
jump-statement:
goto * expression ;
Objective-C:
statement:
objc-throw-statement
objc-try-catch-statement
objc-synchronized-statement
objc-throw-statement:
@throw expression ;
@throw ;
OpenMP:
statement:
openmp-construct
openmp-construct:
parallel-construct
for-construct
sections-construct
single-construct
parallel-for-construct
parallel-sections-construct
master-construct
critical-construct
atomic-construct
ordered-construct
parallel-construct:
parallel-directive structured-block
for-construct:
for-directive iteration-statement
sections-construct:
sections-directive section-scope
single-construct:
single-directive structured-block
parallel-for-construct:
parallel-for-directive iteration-statement
parallel-sections-construct:
parallel-sections-directive section-scope
master-construct:
master-directive structured-block
critical-construct:
critical-directive structured-block
atomic-construct:
atomic-directive expression-statement
ordered-construct:
ordered-directive structured-block */
static void
c_parser_statement (c_parser *parser)
{
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
c_parser_statement_after_labels (parser);
}
/* Parse a statement, other than a labeled statement. */
static void
c_parser_statement_after_labels (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree stmt = NULL_TREE;
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
add_stmt (c_parser_compound_statement (parser));
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_IF:
c_parser_if_statement (parser);
break;
case RID_SWITCH:
c_parser_switch_statement (parser);
break;
case RID_WHILE:
c_parser_while_statement (parser);
break;
case RID_DO:
c_parser_do_statement (parser);
break;
case RID_FOR:
c_parser_for_statement (parser);
break;
case RID_GOTO:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
stmt = c_finish_goto_label (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
c_parser_consume_token (parser);
stmt = c_finish_goto_ptr (c_parser_expression (parser).value);
}
else
c_parser_error (parser, "expected identifier or %<*%>");
goto expect_semicolon;
case RID_CONTINUE:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (&c_cont_label, false);
goto expect_semicolon;
case RID_BREAK:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (&c_break_label, true);
goto expect_semicolon;
case RID_RETURN:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = c_finish_return (NULL_TREE);
c_parser_consume_token (parser);
}
else
{
stmt = c_finish_return (c_parser_expression_conv (parser).value);
goto expect_semicolon;
}
break;
case RID_ASM:
stmt = c_parser_asm_statement (parser);
break;
case RID_AT_THROW:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = objc_build_throw_stmt (NULL_TREE);
c_parser_consume_token (parser);
}
else
{
stmt
= objc_build_throw_stmt (c_parser_expression (parser).value);
goto expect_semicolon;
}
break;
case RID_AT_TRY:
gcc_assert (c_dialect_objc ());
c_parser_objc_try_catch_statement (parser);
break;
case RID_AT_SYNCHRONIZED:
gcc_assert (c_dialect_objc ());
c_parser_objc_synchronized_statement (parser);
break;
default:
goto expr_stmt;
}
break;
case CPP_SEMICOLON:
c_parser_consume_token (parser);
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
/* Avoid infinite loop in error recovery:
c_parser_skip_until_found stops at a closing nesting
delimiter without consuming it, but here we need to consume
it to proceed further. */
c_parser_error (parser, "expected statement");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_stmt);
break;
default:
expr_stmt:
stmt = c_finish_expr_stmt (c_parser_expression_conv (parser).value);
expect_semicolon:
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
break;
}
/* Two cases cannot and do not have line numbers associated: If stmt
is degenerate, such as "2;", then stmt is an INTEGER_CST, which
cannot hold line numbers. But that's OK because the statement
will either be changed to a MODIFY_EXPR during gimplification of
the statement expr, or discarded. If stmt was compound, but
without new variables, we will have skipped the creation of a
BIND and will have a bare STATEMENT_LIST. But that's OK because
(recursively) all of the component statements should already have
line numbers assigned. ??? Can we discard no-op statements
earlier? */
if (stmt && EXPR_P (stmt))
SET_EXPR_LOCATION (stmt, loc);
}
/* Parse a parenthesized condition from an if, do or while statement.
condition:
( expression )
*/
static tree
c_parser_paren_condition (c_parser *parser)
{
location_t loc;
tree cond;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return error_mark_node;
loc = c_parser_peek_token (parser)->location;
cond = c_objc_common_truthvalue_conversion
(c_parser_expression_conv (parser).value);
if (EXPR_P (cond))
SET_EXPR_LOCATION (cond, loc);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
return cond;
}
/* Parse a statement which is a block in C99. */
static tree
c_parser_c99_block_statement (c_parser *parser)
{
tree block = c_begin_compound_stmt (flag_isoc99);
c_parser_statement (parser);
return c_end_compound_stmt (block, flag_isoc99);
}
/* Parse the body of an if statement or the else half thereof. This
is just parsing a statement but (a) it is a block in C99, (b) we
track whether the body is an if statement for the sake of
-Wparentheses warnings, (c) we handle an empty body specially for
the sake of -Wextra warnings. */
static tree
c_parser_if_body (c_parser *parser, bool *if_p)
{
tree block = c_begin_compound_stmt (flag_isoc99);
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
*if_p = c_parser_next_token_is_keyword (parser, RID_IF);
if (extra_warnings && c_parser_next_token_is (parser, CPP_SEMICOLON))
add_stmt (build_empty_stmt ());
c_parser_statement_after_labels (parser);
return c_end_compound_stmt (block, flag_isoc99);
}
/* Parse an if statement (C90 6.6.4, C99 6.8.4).
if-statement:
if ( expression ) statement
if ( expression ) statement else statement
*/
static void
c_parser_if_statement (c_parser *parser)
{
tree block;
location_t loc;
tree cond;
bool first_if = false, second_if = false;
tree first_body, second_body;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
first_body = c_parser_if_body (parser, &first_if);
if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
c_parser_consume_token (parser);
second_body = c_parser_if_body (parser, &second_if);
}
else
second_body = NULL_TREE;
c_finish_if_stmt (loc, cond, first_body, second_body, first_if);
add_stmt (c_end_compound_stmt (block, flag_isoc99));
}
/* Parse a switch statement (C90 6.6.4, C99 6.8.4).
switch-statement:
switch (expression) statement
*/
static void
c_parser_switch_statement (c_parser *parser)
{
tree block, expr, body, save_break;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
expr = error_mark_node;
c_start_case (expr);
save_break = c_break_label;
c_break_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_finish_case (body);
if (c_break_label)
add_stmt (build1 (LABEL_EXPR, void_type_node, c_break_label));
c_break_label = save_break;
add_stmt (c_end_compound_stmt (block, flag_isoc99));
}
/* Parse a while statement (C90 6.6.5, C99 6.8.5).
while-statement:
while (expression) statement
*/
static void
c_parser_while_statement (c_parser *parser)
{
tree block, cond, body, save_break, save_cont;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true);
add_stmt (c_end_compound_stmt (block, flag_isoc99));
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse a do statement (C90 6.6.5, C99 6.8.5).
do-statement:
do statement while ( expression ) ;
*/
static void
c_parser_do_statement (c_parser *parser)
{
tree block, cond, body, save_break, save_cont, new_break, new_cont;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>");
new_break = c_break_label;
c_break_label = save_break;
new_cont = c_cont_label;
c_cont_label = save_cont;
cond = c_parser_paren_condition (parser);
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false);
add_stmt (c_end_compound_stmt (block, flag_isoc99));
}
/* Parse a for statement (C90 6.6.5, C99 6.8.5).
for-statement:
for ( expression[opt] ; expression[opt] ; expression[opt] ) statement
for ( nested-declaration expression[opt] ; expression[opt] ) statement
The form with a declaration is new in C99.
??? In accordance with the old parser, the declaration may be a
nested function, which is then rejected in check_for_loop_decls,
but does it make any sense for this to be included in the grammar?
Note in particular that the nested function does not include a
trailing ';', whereas the "declaration" production includes one.
Also, can we reject bad declarations earlier and cheaper than
check_for_loop_decls? */
static void
c_parser_for_statement (c_parser *parser)
{
tree block, cond, incr, save_break, save_cont, body;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
/* Parse the initialization declaration or expression. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
c_finish_expr_stmt (NULL_TREE);
}
else if (c_parser_next_token_starts_declspecs (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true);
check_for_loop_decls ();
}
else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_declaration_or_fndef (parser, true, true, true, true);
restore_extension_diagnostics (ext);
check_for_loop_decls ();
}
else
goto init_expr;
}
else
{
init_expr:
c_finish_expr_stmt (c_parser_expression (parser).value);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse the loop condition. */
loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
cond = NULL_TREE;
}
else
{
tree ocond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (ocond);
if (EXPR_P (cond))
SET_EXPR_LOCATION (cond, loc);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse the increment expression. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
incr = c_process_expr_stmt (NULL_TREE);
else
incr = c_process_expr_stmt (c_parser_expression (parser).value);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
{
cond = error_mark_node;
incr = error_mark_node;
}
save_break = c_break_label;
c_break_label = NULL_TREE;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true);
add_stmt (c_end_compound_stmt (block, flag_isoc99));
c_break_label = save_break;
c_cont_label = save_cont;
}
/* Parse an asm statement, a GNU extension. This is a full-blown asm
statement with inputs, outputs, clobbers, and volatile tag
allowed.
asm-statement:
asm type-qualifier[opt] ( asm-argument ) ;
asm-argument:
asm-string-literal
asm-string-literal : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers
Qualifiers other than volatile are accepted in the syntax but
warned for. */
static tree
c_parser_asm_statement (c_parser *parser)
{
tree quals, str, outputs, inputs, clobbers, ret;
bool simple;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_VOLATILE))
{
quals = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_CONST)
|| c_parser_next_token_is_keyword (parser, RID_RESTRICT))
{
warning (0, "%E qualifier ignored on asm",
c_parser_peek_token (parser)->value);
quals = NULL_TREE;
c_parser_consume_token (parser);
}
else
quals = NULL_TREE;
/* ??? Follow the C++ parser rather than using the
c_lex_string_translate kludge. */
c_lex_string_translate = 0;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 1;
return NULL_TREE;
}
str = c_parser_asm_string_literal (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
simple = true;
outputs = NULL_TREE;
inputs = NULL_TREE;
clobbers = NULL_TREE;
goto done_asm;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
simple = false;
/* Parse outputs. */
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
outputs = NULL_TREE;
else
outputs = c_parser_asm_operands (parser, false);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
inputs = NULL_TREE;
clobbers = NULL_TREE;
goto done_asm;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
/* Parse inputs. */
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
inputs = NULL_TREE;
else
inputs = c_parser_asm_operands (parser, true);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
clobbers = NULL_TREE;
goto done_asm;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%> or %<)%>"))
{
c_lex_string_translate = 1;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
/* Parse clobbers. */
clobbers = c_parser_asm_clobbers (parser);
done_asm:
c_lex_string_translate = 1;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
ret = build_asm_stmt (quals, build_asm_expr (str, outputs, inputs,
clobbers, simple));
return ret;
}
/* Parse asm operands, a GNU extension. If CONVERT_P (for inputs but
not outputs), apply the default conversion of functions and arrays
to pointers.
asm-operands:
asm-operand
asm-operands , asm-operand
asm-operand:
asm-string-literal ( expression )
[ identifier ] asm-string-literal ( expression )
*/
static tree
c_parser_asm_operands (c_parser *parser, bool convert_p)
{
tree list = NULL_TREE;
while (true)
{
tree name, str;
struct c_expr expr;
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
name = build_string (IDENTIFIER_LENGTH (id),
IDENTIFIER_POINTER (id));
}
else
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL);
return NULL_TREE;
}
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
else
name = NULL_TREE;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
return NULL_TREE;
c_lex_string_translate = 1;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
c_lex_string_translate = 0;
return NULL_TREE;
}
expr = c_parser_expression (parser);
if (convert_p)
expr = default_function_array_conversion (expr);
c_lex_string_translate = 0;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
list = chainon (list, build_tree_list (build_tree_list (name, str),
expr.value));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm clobbers, a GNU extension.
asm-clobbers:
asm-string-literal
asm-clobbers , asm-string-literal
*/
static tree
c_parser_asm_clobbers (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree str = c_parser_asm_string_literal (parser);
if (str)
list = tree_cons (NULL_TREE, str, list);
else
return NULL_TREE;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse an expression other than a compound expression; that is, an
assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
assignment-expression:
conditional-expression
unary-expression assignment-operator assignment-expression
assignment-operator: one of
= *= /= %= += -= <<= >>= &= ^= |=
In GNU C we accept any conditional expression on the LHS and
diagnose the invalid lvalue rather than producing a syntax
error. */
static struct c_expr
c_parser_expr_no_commas (c_parser *parser, struct c_expr *after)
{
struct c_expr lhs, rhs, ret;
enum tree_code code;
gcc_assert (!after || c_dialect_objc ());
lhs = c_parser_conditional_expression (parser, after);
switch (c_parser_peek_token (parser)->type)
{
case CPP_EQ:
code = NOP_EXPR;
break;
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
code = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
default:
return lhs;
}
c_parser_consume_token (parser);
rhs = c_parser_expr_no_commas (parser, NULL);
rhs = default_function_array_conversion (rhs);
ret.value = build_modify_expr (lhs.value, code, rhs.value);
if (code == NOP_EXPR)
ret.original_code = MODIFY_EXPR;
else
{
TREE_NO_WARNING (ret.value) = 1;
ret.original_code = ERROR_MARK;
}
return ret;
}
/* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER
is not NULL then it is an Objective-C message expression which is
the primary-expression starting the expression as an initializer.
conditional-expression:
logical-OR-expression
logical-OR-expression ? expression : conditional-expression
GNU extensions:
conditional-expression:
logical-OR-expression ? : conditional-expression
*/
static struct c_expr
c_parser_conditional_expression (c_parser *parser, struct c_expr *after)
{
struct c_expr cond, exp1, exp2, ret;
gcc_assert (!after || c_dialect_objc ());
cond = c_parser_binary_expression (parser, after);
if (c_parser_next_token_is_not (parser, CPP_QUERY))
return cond;
cond = default_function_array_conversion (cond);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
if (pedantic)
pedwarn ("ISO C forbids omitting the middle term of a ?: expression");
/* Make sure first operand is calculated only once. */
exp1.value = save_expr (default_conversion (cond.value));
cond.value = c_objc_common_truthvalue_conversion (exp1.value);
skip_evaluation += cond.value == truthvalue_true_node;
}
else
{
cond.value
= c_objc_common_truthvalue_conversion
(default_conversion (cond.value));
skip_evaluation += cond.value == truthvalue_false_node;
exp1 = c_parser_expression_conv (parser);
skip_evaluation += ((cond.value == truthvalue_true_node)
- (cond.value == truthvalue_false_node));
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
skip_evaluation -= cond.value == truthvalue_true_node;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
exp2 = c_parser_conditional_expression (parser, NULL);
exp2 = default_function_array_conversion (exp2);
skip_evaluation -= cond.value == truthvalue_true_node;
ret.value = build_conditional_expr (cond.value, exp1.value, exp2.value);
ret.original_code = ERROR_MARK;
return ret;
}
/* Parse a binary expression; that is, a logical-OR-expression (C90
6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is
an Objective-C message expression which is the primary-expression
starting the expression as an initializer.
multiplicative-expression:
cast-expression
multiplicative-expression * cast-expression
multiplicative-expression / cast-expression
multiplicative-expression % cast-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
AND-expression:
equality-expression
AND-expression & equality-expression
exclusive-OR-expression:
AND-expression
exclusive-OR-expression ^ AND-expression
inclusive-OR-expression:
exclusive-OR-expression
inclusive-OR-expression | exclusive-OR-expression
logical-AND-expression:
inclusive-OR-expression
logical-AND-expression && inclusive-OR-expression
logical-OR-expression:
logical-AND-expression
logical-OR-expression || logical-AND-expression
*/
static struct c_expr
c_parser_binary_expression (c_parser *parser, struct c_expr *after)
{
/* A binary expression is parsed using operator-precedence parsing,
with the operands being cast expressions. All the binary
operators are left-associative. Thus a binary expression is of
form:
E0 op1 E1 op2 E2 ...
which we represent on a stack. On the stack, the precedence
levels are strictly increasing. When a new operator is
encountered of higher precedence than that at the top of the
stack, it is pushed; its LHS is the top expression, and its RHS
is everything parsed until it is popped. When a new operator is
encountered with precedence less than or equal to that at the top
of the stack, triples E[i-1] op[i] E[i] are popped and replaced
by the result of the operation until the operator at the top of
the stack has lower precedence than the new operator or there is
only one element on the stack; then the top expression is the LHS
of the new operator. In the case of logical AND and OR
expressions, we also need to adjust skip_evaluation as
appropriate when the operators are pushed and popped. */
/* The precedence levels, where 0 is a dummy lowest level used for
the bottom of the stack. */
enum prec {
PREC_NONE,
PREC_LOGOR,
PREC_LOGAND,
PREC_BITOR,
PREC_BITXOR,
PREC_BITAND,
PREC_EQ,
PREC_REL,
PREC_SHIFT,
PREC_ADD,
PREC_MULT,
NUM_PRECS
};
struct {
/* The expression at this stack level. */
struct c_expr expr;
/* The precedence of the operator on its left, PREC_NONE at the
bottom of the stack. */
enum prec prec;
/* The operation on its left. */
enum tree_code op;
} stack[NUM_PRECS];
int sp;
#define POP \
do { \
switch (stack[sp].op) \
{ \
case TRUTH_ANDIF_EXPR: \
skip_evaluation -= stack[sp - 1].expr.value == truthvalue_false_node; \
break; \
case TRUTH_ORIF_EXPR: \
skip_evaluation -= stack[sp - 1].expr.value == truthvalue_true_node; \
break; \
default: \
break; \
} \
stack[sp - 1].expr \
= default_function_array_conversion (stack[sp - 1].expr); \
stack[sp].expr \
= default_function_array_conversion (stack[sp].expr); \
stack[sp - 1].expr = parser_build_binary_op (stack[sp].op, \
stack[sp - 1].expr, \
stack[sp].expr); \
sp--; \
} while (0)
gcc_assert (!after || c_dialect_objc ());
stack[0].expr = c_parser_cast_expression (parser, after);
stack[0].prec = PREC_NONE;
sp = 0;
while (true)
{
enum prec oprec;
enum tree_code ocode;
if (parser->error)
goto out;
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT:
oprec = PREC_MULT;
ocode = MULT_EXPR;
break;
case CPP_DIV:
oprec = PREC_MULT;
ocode = TRUNC_DIV_EXPR;
break;
case CPP_MOD:
oprec = PREC_MULT;
ocode = TRUNC_MOD_EXPR;
break;
case CPP_PLUS:
oprec = PREC_ADD;
ocode = PLUS_EXPR;
break;
case CPP_MINUS:
oprec = PREC_ADD;
ocode = MINUS_EXPR;
break;
case CPP_LSHIFT:
oprec = PREC_SHIFT;
ocode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
oprec = PREC_SHIFT;
ocode = RSHIFT_EXPR;
break;
case CPP_LESS:
oprec = PREC_REL;
ocode = LT_EXPR;
break;
case CPP_GREATER:
oprec = PREC_REL;
ocode = GT_EXPR;
break;
case CPP_LESS_EQ:
oprec = PREC_REL;
ocode = LE_EXPR;
break;
case CPP_GREATER_EQ:
oprec = PREC_REL;
ocode = GE_EXPR;
break;
case CPP_EQ_EQ:
oprec = PREC_EQ;
ocode = EQ_EXPR;
break;
case CPP_NOT_EQ:
oprec = PREC_EQ;
ocode = NE_EXPR;
break;
case CPP_AND:
oprec = PREC_BITAND;
ocode = BIT_AND_EXPR;
break;
case CPP_XOR:
oprec = PREC_BITXOR;
ocode = BIT_XOR_EXPR;
break;
case CPP_OR:
oprec = PREC_BITOR;
ocode = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
oprec = PREC_LOGAND;
ocode = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
oprec = PREC_LOGOR;
ocode = TRUTH_ORIF_EXPR;
break;
default:
/* Not a binary operator, so end of the binary
expression. */
goto out;
}
c_parser_consume_token (parser);
while (oprec <= stack[sp].prec)
POP;
switch (ocode)
{
case TRUTH_ANDIF_EXPR:
stack[sp].expr
= default_function_array_conversion (stack[sp].expr);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(default_conversion (stack[sp].expr.value));
skip_evaluation += stack[sp].expr.value == truthvalue_false_node;
break;
case TRUTH_ORIF_EXPR:
stack[sp].expr
= default_function_array_conversion (stack[sp].expr);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(default_conversion (stack[sp].expr.value));
skip_evaluation += stack[sp].expr.value == truthvalue_true_node;
break;
default:
break;
}
sp++;
stack[sp].expr = c_parser_cast_expression (parser, NULL);
stack[sp].prec = oprec;
stack[sp].op = ocode;
}
out:
while (sp > 0)
POP;
return stack[0].expr;
#undef POP
}
/* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
cast-expression:
unary-expression
( type-name ) unary-expression
*/
static struct c_expr
c_parser_cast_expression (c_parser *parser, struct c_expr *after)
{
gcc_assert (!after || c_dialect_objc ());
if (after)
return c_parser_postfix_expression_after_primary (parser, *after);
/* If the expression begins with a parenthesized type name, it may
be either a cast or a compound literal; we need to see whether
the next character is '{' to tell the difference. If not, it is
an unary expression. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
struct c_type_name *type_name;
struct c_expr ret;
struct c_expr expr;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
/* Save casted types in the function's used types hash table. */
used_types_insert (type_name->specs->type);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_postfix_expression_after_paren_type (parser,
type_name);
expr = c_parser_cast_expression (parser, NULL);
expr = default_function_array_conversion (expr);
ret.value = c_cast_expr (type_name, expr.value);
ret.original_code = ERROR_MARK;
return ret;
}
else
return c_parser_unary_expression (parser);
}
/* Parse an unary expression (C90 6.3.3, C99 6.5.3).
unary-expression:
postfix-expression
++ unary-expression
-- unary-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-name )
unary-operator: one of
& * + - ~ !
GNU extensions:
unary-expression:
__alignof__ unary-expression
__alignof__ ( type-name )
&& identifier
unary-operator: one of
__extension__ __real__ __imag__
In addition, the GNU syntax treats ++ and -- as unary operators, so
they may be applied to cast expressions with errors for non-lvalues
given later. */
static struct c_expr
c_parser_unary_expression (c_parser *parser)
{
int ext;
struct c_expr ret, op;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS_PLUS:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (PREINCREMENT_EXPR, op);
case CPP_MINUS_MINUS:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (PREDECREMENT_EXPR, op);
case CPP_AND:
c_parser_consume_token (parser);
return parser_build_unary_op (ADDR_EXPR,
c_parser_cast_expression (parser, NULL));
case CPP_MULT:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
ret.value = build_indirect_ref (op.value, "unary *");
ret.original_code = ERROR_MARK;
return ret;
case CPP_PLUS:
c_parser_consume_token (parser);
if (!c_dialect_objc () && !in_system_header)
warning (OPT_Wtraditional,
"traditional C rejects the unary plus operator");
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (CONVERT_EXPR, op);
case CPP_MINUS:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (NEGATE_EXPR, op);
case CPP_COMPL:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (BIT_NOT_EXPR, op);
case CPP_NOT:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (TRUTH_NOT_EXPR, op);
case CPP_AND_AND:
/* Refer to the address of a label as a pointer. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ret.value = finish_label_address_expr
(c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected identifier");
ret.value = error_mark_node;
}
ret.original_code = ERROR_MARK;
return ret;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_SIZEOF:
return c_parser_sizeof_expression (parser);
case RID_ALIGNOF:
return c_parser_alignof_expression (parser);
case RID_EXTENSION:
c_parser_consume_token (parser);
ext = disable_extension_diagnostics ();
ret = c_parser_cast_expression (parser, NULL);
restore_extension_diagnostics (ext);
return ret;
case RID_REALPART:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (REALPART_EXPR, op);
case RID_IMAGPART:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (op);
return parser_build_unary_op (IMAGPART_EXPR, op);
default:
return c_parser_postfix_expression (parser);
}
default:
return c_parser_postfix_expression (parser);
}
}
/* Parse a sizeof expression. */
static struct c_expr
c_parser_sizeof_expression (c_parser *parser)
{
struct c_expr expr;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF));
c_parser_consume_token (parser);
skip_evaluation++;
in_sizeof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either sizeof ( type-name ) or sizeof unary-expression
starting with a compound literal. */
struct c_type_name *type_name;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
struct c_expr ret;
skip_evaluation--;
in_sizeof--;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name);
goto sizeof_expr;
}
/* sizeof ( type-name ). */
skip_evaluation--;
in_sizeof--;
if (type_name->declarator->kind == cdk_array
&& type_name->declarator->u.array.vla_unspec_p)
{
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than a declaration");
}
return c_expr_sizeof_type (type_name);
}
else
{
expr = c_parser_unary_expression (parser);
sizeof_expr:
skip_evaluation--;
in_sizeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error ("%<sizeof%> applied to a bit-field");
return c_expr_sizeof_expr (expr);
}
}
/* Parse an alignof expression. */
static struct c_expr
c_parser_alignof_expression (c_parser *parser)
{
struct c_expr expr;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF));
c_parser_consume_token (parser);
skip_evaluation++;
in_alignof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either __alignof__ ( type-name ) or __alignof__
unary-expression starting with a compound literal. */
struct c_type_name *type_name;
struct c_expr ret;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (type_name == NULL)
{
struct c_expr ret;
skip_evaluation--;
in_alignof--;
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name);
goto alignof_expr;
}
/* alignof ( type-name ). */
skip_evaluation--;
in_alignof--;
ret.value = c_alignof (groktypename (type_name));
ret.original_code = ERROR_MARK;
return ret;
}
else
{
struct c_expr ret;
expr = c_parser_unary_expression (parser);
alignof_expr:
skip_evaluation--;
in_alignof--;
ret.value = c_alignof_expr (expr.value);
ret.original_code = ERROR_MARK;
return ret;
}
}
/* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2).
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( argument-expression-list[opt] )
postfix-expression . identifier
postfix-expression -> identifier
postfix-expression ++
postfix-expression --
( type-name ) { initializer-list }
( type-name ) { initializer-list , }
argument-expression-list:
argument-expression
argument-expression-list , argument-expression
primary-expression:
identifier
constant
string-literal
( expression )
GNU extensions:
primary-expression:
__func__
(treated as a keyword in GNU C)
__FUNCTION__
__PRETTY_FUNCTION__
( compound-statement )
__builtin_va_arg ( assignment-expression , type-name )
__builtin_offsetof ( type-name , offsetof-member-designator )
__builtin_choose_expr ( assignment-expression ,
assignment-expression ,
assignment-expression )
__builtin_types_compatible_p ( type-name , type-name )
offsetof-member-designator:
identifier
offsetof-member-designator . identifier
offsetof-member-designator [ expression ]
Objective-C:
primary-expression:
[ objc-receiver objc-message-args ]
@selector ( objc-selector-arg )
@protocol ( identifier )
@encode ( type-name )
objc-string-literal
*/
static struct c_expr
c_parser_postfix_expression (c_parser *parser)
{
struct c_expr expr, e1, e2, e3;
struct c_type_name *t1, *t2;
switch (c_parser_peek_token (parser)->type)
{
case CPP_NUMBER:
case CPP_CHAR:
case CPP_WCHAR:
expr.value = c_parser_peek_token (parser)->value;
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
case CPP_STRING:
case CPP_WSTRING:
expr.value = c_parser_peek_token (parser)->value;
expr.original_code = STRING_CST;
c_parser_consume_token (parser);
break;
case CPP_OBJC_STRING:
gcc_assert (c_dialect_objc ());
expr.value
= objc_build_string_object (c_parser_peek_token (parser)->value);
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
case CPP_NAME:
if (c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree id = c_parser_peek_token (parser)->value;
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
expr.value = build_external_ref (id,
(c_parser_peek_token (parser)->type
== CPP_OPEN_PAREN), loc);
expr.original_code = ERROR_MARK;
}
break;
case CPP_OPEN_PAREN:
/* A parenthesized expression, statement expression or compound
literal. */
if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE)
{
/* A statement expression. */
tree stmt;
c_parser_consume_token (parser);
c_parser_consume_token (parser);
if (cur_stmt_list == NULL)
{
error ("braced-group within expression allowed "
"only inside a function");
parser->error = true;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
stmt = c_begin_stmt_expr ();
c_parser_compound_statement_nostart (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (pedantic)
pedwarn ("ISO C forbids braced-groups within expressions");
expr.value = c_finish_stmt_expr (stmt);
expr.original_code = ERROR_MARK;
}
else if (c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* A compound literal. ??? Can we actually get here rather
than going directly to
c_parser_postfix_expression_after_paren_type from
elsewhere? */
struct c_type_name *type_name;
c_parser_consume_token (parser);
type_name = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (type_name == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
}
else
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name);
}
else
{
/* A parenthesized expression. */
c_parser_consume_token (parser);
expr = c_parser_expression (parser);
if (TREE_CODE (expr.value) == MODIFY_EXPR)
TREE_NO_WARNING (expr.value) = 1;
expr.original_code = ERROR_MARK;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
expr.value = fname_decl (c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
expr.original_code = ERROR_MARK;
c_parser_consume_token (parser);
break;
case RID_VA_ARG:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
}
else
{
expr.value = build_va_arg (e1.value, groktypename (t1));
expr.original_code = ERROR_MARK;
}
break;
case RID_OFFSETOF:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree type = groktypename (t1);
tree offsetof_ref;
if (type == error_mark_node)
offsetof_ref = error_mark_node;
else
offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node);
/* Parse the second argument to __builtin_offsetof. We
must have one identifier, and beyond that we want to
accept sub structure and sub array references. */
if (c_parser_next_token_is (parser, CPP_NAME))
{
offsetof_ref = build_component_ref
(offsetof_ref, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_DOT)
|| c_parser_next_token_is (parser,
CPP_OPEN_SQUARE))
{
if (c_parser_next_token_is (parser, CPP_DOT))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser,
CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
offsetof_ref = build_component_ref
(offsetof_ref,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else
{
tree idx;
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
offsetof_ref = build_array_ref (offsetof_ref, idx);
}
}
}
else
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = fold_offsetof (offsetof_ref, NULL_TREE);
expr.original_code = ERROR_MARK;
}
break;
case RID_CHOOSE_EXPR:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e2 = c_parser_expr_no_commas (parser, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
e3 = c_parser_expr_no_commas (parser, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree c;
c = fold (e1.value);
if (TREE_CODE (c) != INTEGER_CST)
error ("first argument to %<__builtin_choose_expr%> not"
" a constant");
expr = integer_zerop (c) ? e3 : e2;
}
break;
case RID_TYPES_COMPATIBLE_P:
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t2 = c_parser_type_name (parser);
if (t2 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree e1, e2;
e1 = TYPE_MAIN_VARIANT (groktypename (t1));
e2 = TYPE_MAIN_VARIANT (groktypename (t2));
expr.value = comptypes (e1, e2)
? build_int_cst (NULL_TREE, 1)
: build_int_cst (NULL_TREE, 0);
expr.original_code = ERROR_MARK;
}
break;
case RID_AT_SELECTOR:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree sel = c_parser_objc_selector_arg (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = objc_build_selector_expr (sel);
expr.original_code = ERROR_MARK;
}
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = objc_build_protocol_expr (id);
expr.original_code = ERROR_MARK;
}
break;
case RID_AT_ENCODE:
/* Extension to support C-structures in the archiver. */
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
{
tree type = groktypename (t1);
expr.value = objc_build_encode_expr (type);
expr.original_code = ERROR_MARK;
}
break;
default:
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
break;
case CPP_OPEN_SQUARE:
if (c_dialect_objc ())
{
tree receiver, args;
c_parser_consume_token (parser);
receiver = c_parser_objc_receiver (parser);
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = objc_build_message_expr (build_tree_list (receiver,
args));
expr.original_code = ERROR_MARK;
break;
}
/* Else fall through to report error. */
default:
c_parser_error (parser, "expected expression");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
break;
}
return c_parser_postfix_expression_after_primary (parser, expr);
}
/* Parse a postfix expression after a parenthesized type name: the
brace-enclosed initializer of a compound literal, possibly followed
by some postfix operators. This is separate because it is not
possible to tell until after the type name whether a cast
expression has a cast or a compound literal, or whether the operand
of sizeof is a parenthesized type name or starts with a compound
literal. */
static struct c_expr
c_parser_postfix_expression_after_paren_type (c_parser *parser,
struct c_type_name *type_name)
{
tree type;
struct c_expr init;
struct c_expr expr;
start_init (NULL_TREE, NULL, 0);
type = groktypename (type_name);
if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type))
{
error ("compound literal has variable size");
type = error_mark_node;
}
init = c_parser_braced_init (parser, type, false);
finish_init ();
maybe_warn_string_init (type, init);
if (pedantic && !flag_isoc99)
pedwarn ("ISO C90 forbids compound literals");
expr.value = build_compound_literal (type, init.value);
expr.original_code = ERROR_MARK;
return c_parser_postfix_expression_after_primary (parser, expr);
}
/* Parse a postfix expression after the initial primary or compound
literal; that is, parse a series of postfix operators. */
static struct c_expr
c_parser_postfix_expression_after_primary (c_parser *parser,
struct c_expr expr)
{
tree ident, idx, exprlist;
while (true)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_SQUARE:
/* Array reference. */
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = build_array_ref (expr.value, idx);
expr.original_code = ERROR_MARK;
break;
case CPP_OPEN_PAREN:
/* Function call. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
exprlist = NULL_TREE;
else
exprlist = c_parser_expr_list (parser, true);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = build_function_call (expr.value, exprlist);
expr.original_code = ERROR_MARK;
break;
case CPP_DOT:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
if (c_parser_next_token_is (parser, CPP_NAME))
ident = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
return expr;
}
c_parser_consume_token (parser);
expr.value = build_component_ref (expr.value, ident);
expr.original_code = ERROR_MARK;
break;
case CPP_DEREF:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
if (c_parser_next_token_is (parser, CPP_NAME))
ident = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected identifier");
expr.value = error_mark_node;
expr.original_code = ERROR_MARK;
return expr;
}
c_parser_consume_token (parser);
expr.value = build_component_ref (build_indirect_ref (expr.value,
"->"), ident);
expr.original_code = ERROR_MARK;
break;
case CPP_PLUS_PLUS:
/* Postincrement. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
expr.value = build_unary_op (POSTINCREMENT_EXPR, expr.value, 0);
expr.original_code = ERROR_MARK;
break;
case CPP_MINUS_MINUS:
/* Postdecrement. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr);
expr.value = build_unary_op (POSTDECREMENT_EXPR, expr.value, 0);
expr.original_code = ERROR_MARK;
break;
default:
return expr;
}
}
}
/* Parse an expression (C90 6.3.17, C99 6.5.17).
expression:
assignment-expression
expression , assignment-expression
*/
static struct c_expr
c_parser_expression (c_parser *parser)
{
struct c_expr expr;
expr = c_parser_expr_no_commas (parser, NULL);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
c_parser_consume_token (parser);
next = c_parser_expr_no_commas (parser, NULL);
next = default_function_array_conversion (next);
expr.value = build_compound_expr (expr.value, next.value);
expr.original_code = COMPOUND_EXPR;
}
return expr;
}
/* Parse an expression and convert functions or arrays to
pointers. */
static struct c_expr
c_parser_expression_conv (c_parser *parser)
{
struct c_expr expr;
expr = c_parser_expression (parser);
expr = default_function_array_conversion (expr);
return expr;
}
/* Parse a non-empty list of expressions. If CONVERT_P, convert
functions and arrays to pointers.
nonempty-expr-list:
assignment-expression
nonempty-expr-list , assignment-expression
*/
static tree
c_parser_expr_list (c_parser *parser, bool convert_p)
{
struct c_expr expr;
tree ret, cur;
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = default_function_array_conversion (expr);
ret = cur = build_tree_list (NULL_TREE, expr.value);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = default_function_array_conversion (expr);
cur = TREE_CHAIN (cur) = build_tree_list (NULL_TREE, expr.value);
}
return ret;
}
/* Parse Objective-C-specific constructs. */
/* Parse an objc-class-definition.
objc-class-definition:
@interface identifier objc-superclass[opt] objc-protocol-refs[opt]
objc-class-instance-variables[opt] objc-methodprotolist @end
@implementation identifier objc-superclass[opt]
objc-class-instance-variables[opt]
@interface identifier ( identifier ) objc-protocol-refs[opt]
objc-methodprotolist @end
@implementation identifier ( identifier )
objc-superclass:
: identifier
"@interface identifier (" must start "@interface identifier (
identifier ) ...": objc-methodprotolist in the first production may
not start with a parenthesized identifier as a declarator of a data
definition with no declaration specifiers if the objc-superclass,
objc-protocol-refs and objc-class-instance-variables are omitted. */
static void
c_parser_objc_class_definition (c_parser *parser)
{
bool iface_p;
tree id1;
tree superclass;
if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE))
iface_p = true;
else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION))
iface_p = false;
else
gcc_unreachable ();
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree id2;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!iface_p)
{
objc_start_category_implementation (id1, id2);
return;
}
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_category_interface (id1, id2, proto);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
return;
}
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
superclass = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
superclass = NULL_TREE;
if (iface_p)
{
tree proto = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_class_interface (id1, superclass, proto);
}
else
objc_start_class_implementation (id1, superclass);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
c_parser_objc_class_instance_variables (parser);
if (iface_p)
{
objc_continue_interface ();
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
}
else
{
objc_continue_implementation ();
return;
}
}
/* Parse objc-class-instance-variables.
objc-class-instance-variables:
{ objc-instance-variable-decl-list[opt] }
objc-instance-variable-decl-list:
objc-visibility-spec
objc-instance-variable-decl ;
;
objc-instance-variable-decl-list objc-visibility-spec
objc-instance-variable-decl-list objc-instance-variable-decl ;
objc-instance-variable-decl-list ;
objc-visibility-spec:
@private
@protected
@public
objc-instance-variable-decl:
struct-declaration
*/
static void
c_parser_objc_class_instance_variables (c_parser *parser)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
while (c_parser_next_token_is_not (parser, CPP_EOF))
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (pedantic)
pedwarn ("extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the instance variables. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Parse any objc-visibility-spec. */
if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE))
{
c_parser_consume_token (parser);
objc_set_visibility (2);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED))
{
c_parser_consume_token (parser);
objc_set_visibility (0);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC))
{
c_parser_consume_token (parser);
objc_set_visibility (1);
continue;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external);
continue;
}
/* Parse some comma-separated declarations. */
decls = c_parser_struct_declaration (parser);
{
/* Comma-separated instance variables are chained together in
reverse order; add them one by one. */
tree ivar = nreverse (decls);
for (; ivar; ivar = TREE_CHAIN (ivar))
objc_add_instance_variable (copy_node (ivar));
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
/* Parse an objc-class-declaration.
objc-class-declaration:
@class identifier-list ;
*/
static void
c_parser_objc_class_declaration (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_class (list);
}
/* Parse an objc-alias-declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
*/
static void
c_parser_objc_alias_declaration (c_parser *parser)
{
tree id1, id2;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_alias (id1, id2);
}
/* Parse an objc-protocol-definition.
objc-protocol-definition:
@protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end
@protocol identifier-list ;
"@protocol identifier ;" should be resolved as "@protocol
identifier-list ;": objc-methodprotolist may not start with a
semicolon in the first alternative if objc-protocol-refs are
omitted. */
static void
c_parser_objc_protocol_definition (c_parser *parser)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON)
{
tree list = NULL_TREE;
/* Any identifiers, including those declared as type names, are
OK here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_protocols (list);
}
else
{
tree id = c_parser_peek_token (parser)->value;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_pq_context = 1;
objc_start_protocol (id, proto);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_pq_context = 0;
objc_finish_interface ();
}
}
/* Parse an objc-method-type.
objc-method-type:
+
-
*/
static enum tree_code
c_parser_objc_method_type (c_parser *parser)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
c_parser_consume_token (parser);
return PLUS_EXPR;
case CPP_MINUS:
c_parser_consume_token (parser);
return MINUS_EXPR;
default:
gcc_unreachable ();
}
}
/* Parse an objc-method-definition.
objc-method-definition:
objc-method-type objc-method-decl ;[opt] compound-statement
*/
static void
c_parser_objc_method_definition (c_parser *parser)
{
enum tree_code type = c_parser_objc_method_type (parser);
tree decl;
objc_set_method_type (type);
objc_pq_context = 1;
decl = c_parser_objc_method_decl (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
if (pedantic)
pedwarn ("extra semicolon in method definition specified");
}
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_error (parser, "expected %<{%>");
return;
}
objc_pq_context = 0;
objc_start_method_definition (decl);
add_stmt (c_parser_compound_statement (parser));
objc_finish_method_definition (current_function_decl);
}
/* Parse an objc-methodprotolist.
objc-methodprotolist:
empty
objc-methodprotolist objc-methodproto
objc-methodprotolist declaration
objc-methodprotolist ;
The declaration is a data definition, which may be missing
declaration specifiers under the same rules and diagnostics as
other data definitions outside functions, and the stray semicolon
is diagnosed the same way as a stray semicolon outside a
function. */
static void
c_parser_objc_methodprotolist (c_parser *parser)
{
while (true)
{
/* The list is terminated by @end. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_SEMICOLON:
if (pedantic)
pedwarn ("ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PLUS:
case CPP_MINUS:
c_parser_objc_methodproto (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external);
break;
case CPP_EOF:
return;
default:
if (c_parser_next_token_is_keyword (parser, RID_AT_END))
return;
c_parser_declaration_or_fndef (parser, false, true, false, true);
break;
}
}
}
/* Parse an objc-methodproto.
objc-methodproto:
objc-method-type objc-method-decl ;
*/
static void
c_parser_objc_methodproto (c_parser *parser)
{
enum tree_code type = c_parser_objc_method_type (parser);
tree decl;
objc_set_method_type (type);
/* Remember protocol qualifiers in prototypes. */
objc_pq_context = 1;
decl = c_parser_objc_method_decl (parser);
/* Forget protocol qualifiers here. */
objc_pq_context = 0;
objc_add_method_declaration (decl);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse an objc-method-decl.
objc-method-decl:
( objc-type-name ) objc-selector
objc-selector
( objc-type-name ) objc-keyword-selector objc-optparmlist
objc-keyword-selector objc-optparmlist
objc-keyword-selector:
objc-keyword-decl
objc-keyword-selector objc-keyword-decl
objc-keyword-decl:
objc-selector : ( objc-type-name ) identifier
objc-selector : identifier
: ( objc-type-name ) identifier
: identifier
objc-optparmlist:
objc-optparms objc-optellipsis
objc-optparms:
empty
objc-opt-parms , parameter-declaration
objc-optellipsis:
empty
, ...
*/
static tree
c_parser_objc_method_decl (c_parser *parser)
{
tree type = NULL_TREE;
tree sel;
tree parms = NULL_TREE;
bool ellipsis = false;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
type = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
sel = c_parser_objc_selector (parser);
/* If there is no selector, or a colon follows, we have an
objc-keyword-selector. If there is a selector, and a colon does
not follow, that selector ends the objc-method-decl. */
if (!sel || c_parser_next_token_is (parser, CPP_COLON))
{
tree tsel = sel;
tree list = NULL_TREE;
while (true)
{
tree atype = NULL_TREE, id, keyworddecl;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
break;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
atype = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
keyworddecl = objc_build_keyword_decl (tsel, atype, id);
list = chainon (list, keyworddecl);
tsel = c_parser_objc_selector (parser);
if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
/* Parse the optional parameter list. Optional Objective-C
method parameters follow the C syntax, and may include '...'
to denote a variable number of arguments. */
parms = make_node (TREE_LIST);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis = true;
c_parser_consume_token (parser);
break;
}
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
break;
parms = chainon (parms,
build_tree_list (NULL_TREE, grokparm (parm)));
}
sel = list;
}
return objc_build_method_signature (type, sel, parms, ellipsis);
}
/* Parse an objc-type-name.
objc-type-name:
objc-type-qualifiers[opt] type-name
objc-type-qualifiers[opt]
objc-type-qualifiers:
objc-type-qualifier
objc-type-qualifiers objc-type-qualifier
objc-type-qualifier: one of
in out inout bycopy byref oneway
*/
static tree
c_parser_objc_type_name (c_parser *parser)
{
tree quals = NULL_TREE;
struct c_type_name *typename = NULL;
tree type = NULL_TREE;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_KEYWORD
&& (token->keyword == RID_IN
|| token->keyword == RID_OUT
|| token->keyword == RID_INOUT
|| token->keyword == RID_BYCOPY
|| token->keyword == RID_BYREF
|| token->keyword == RID_ONEWAY))
{
quals = chainon (quals, build_tree_list (NULL_TREE, token->value));
c_parser_consume_token (parser);
}
else
break;
}
if (c_parser_next_token_starts_typename (parser))
typename = c_parser_type_name (parser);
if (typename)
type = groktypename (typename);
return build_tree_list (quals, type);
}
/* Parse objc-protocol-refs.
objc-protocol-refs:
< identifier-list >
*/
static tree
c_parser_objc_protocol_refs (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is (parser, CPP_LESS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_require (parser, CPP_GREATER, "expected %<>%>");
return list;
}
/* Parse an objc-try-catch-statement.
objc-try-catch-statement:
@try compound-statement objc-catch-list[opt]
@try compound-statement objc-catch-list[opt] @finally compound-statement
objc-catch-list:
@catch ( parameter-declaration ) compound-statement
objc-catch-list @catch ( parameter-declaration ) compound-statement
*/
static void
c_parser_objc_try_catch_statement (c_parser *parser)
{
location_t loc;
tree stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
stmt = c_parser_compound_statement (parser);
objc_begin_try_stmt (loc, stmt);
while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
break;
parm = c_parser_parameter_declaration (parser, NULL_TREE);
if (parm == NULL)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
objc_begin_catch_clause (grokparm (parm));
if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
c_parser_compound_statement_nostart (parser);
objc_finish_catch_clause ();
}
if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY))
{
location_t finloc;
tree finstmt;
c_parser_consume_token (parser);
finloc = c_parser_peek_token (parser)->location;
finstmt = c_parser_compound_statement (parser);
objc_build_finally_clause (finloc, finstmt);
}
objc_finish_try_stmt ();
}
/* Parse an objc-synchronized-statement.
objc-synchronized-statement:
@synchronized ( expression ) compound-statement
*/
static void
c_parser_objc_synchronized_statement (c_parser *parser)
{
location_t loc;
tree expr, stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
expr = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
expr = error_mark_node;
stmt = c_parser_compound_statement (parser);
objc_build_synchronized (loc, expr, stmt);
}
/* Parse an objc-selector; return NULL_TREE without an error if the
next token is not an objc-selector.
objc-selector:
identifier
one of
enum struct union if else while do for switch case default
break continue return goto asm sizeof typeof __alignof
unsigned long const short volatile signed restrict _Complex
in out inout bycopy byref oneway int char float double void _Bool
??? Why this selection of keywords but not, for example, storage
class specifiers? */
static tree
c_parser_objc_selector (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
tree value = token->value;
if (token->type == CPP_NAME)
{
c_parser_consume_token (parser);
return value;
}
if (token->type != CPP_KEYWORD)
return NULL_TREE;
switch (token->keyword)
{
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_IF:
case RID_ELSE:
case RID_WHILE:
case RID_DO:
case RID_FOR:
case RID_SWITCH:
case RID_CASE:
case RID_DEFAULT:
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
case RID_ASM:
case RID_SIZEOF:
case RID_TYPEOF:
case RID_ALIGNOF:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_SHORT:
case RID_VOLATILE:
case RID_SIGNED:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_IN:
case RID_OUT:
case RID_INOUT:
case RID_BYCOPY:
case RID_BYREF:
case RID_ONEWAY:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_BOOL:
c_parser_consume_token (parser);
return value;
default:
return NULL_TREE;
}
}
/* Parse an objc-selector-arg.
objc-selector-arg:
objc-selector
objc-keywordname-list
objc-keywordname-list:
objc-keywordname
objc-keywordname-list objc-keywordname
objc-keywordname:
objc-selector :
:
*/
static tree
c_parser_objc_selector_arg (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
list = chainon (list, build_tree_list (sel, NULL_TREE));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-receiver.
objc-receiver:
expression
class-name
type-name
*/
static tree
c_parser_objc_receiver (c_parser *parser)
{
if (c_parser_peek_token (parser)->type == CPP_NAME
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
return objc_get_class_reference (id);
}
return c_parser_expression (parser).value;
}
/* Parse objc-message-args.
objc-message-args:
objc-selector
objc-keywordarg-list
objc-keywordarg-list:
objc-keywordarg
objc-keywordarg-list objc-keywordarg
objc-keywordarg:
objc-selector : objc-keywordexpr
: objc-keywordexpr
*/
static tree
c_parser_objc_message_args (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
tree keywordexpr;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
keywordexpr = c_parser_objc_keywordexpr (parser);
list = chainon (list, build_tree_list (sel, keywordexpr));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-keywordexpr.
objc-keywordexpr:
nonempty-expr-list
*/
static tree
c_parser_objc_keywordexpr (c_parser *parser)
{
tree list = c_parser_expr_list (parser, true);
if (TREE_CHAIN (list) == NULL_TREE)
{
/* Just return the expression, remove a level of
indirection. */
return TREE_VALUE (list);
}
else
{
/* We have a comma expression, we will collapse later. */
return list;
}
}
/* Handle pragmas. Some OpenMP pragmas are associated with, and therefore
should be considered, statements. ALLOW_STMT is true if we're within
the context of a function and such pragmas are to be allowed. Returns
true if we actually parsed such a pragma. */
static bool
c_parser_pragma (c_parser *parser, enum pragma_context context)
{
unsigned int id;
id = c_parser_peek_token (parser)->pragma_kind;
gcc_assert (id != PRAGMA_NONE);
switch (id)
{
case PRAGMA_OMP_BARRIER:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp barrier%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_barrier (parser);
return false;
case PRAGMA_OMP_FLUSH:
if (context != pragma_compound)
{
if (context == pragma_stmt)
c_parser_error (parser, "%<#pragma omp flush%> may only be "
"used in compound statements");
goto bad_stmt;
}
c_parser_omp_flush (parser);
return false;
case PRAGMA_OMP_THREADPRIVATE:
c_parser_omp_threadprivate (parser);
return false;
case PRAGMA_OMP_SECTION:
error ("%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_GCC_PCH_PREPROCESS:
c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
default:
if (id < PRAGMA_FIRST_EXTERNAL)
{
if (context == pragma_external)
{
bad_stmt:
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_omp_construct (parser);
return true;
}
break;
}
c_parser_consume_pragma (parser);
c_invoke_pragma_handler (id);
/* Skip to EOL, but suppress any error message. Those will have been
generated by the handler routine through calling error, as opposed
to calling c_parser_error. */
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
c_token *tok = c_parser_peek_token (the_parser);
enum cpp_ttype ret = tok->type;
*value = tok->value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else
{
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
c_parser_consume_token (the_parser);
}
return ret;
}
static void
c_parser_pragma_pch_preprocess (c_parser *parser)
{
tree name = NULL;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_STRING))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected string literal");
c_parser_skip_to_pragma_eol (parser);
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
}
/* OpenMP 2.5 parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
c_parser_omp_clause_name (c_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (c_parser_next_token_is_keyword (parser, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'c':
if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
break;
case 'f':
if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
break;
case 'n':
if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'p':
if (!strcmp ("private", p))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
c_parser_consume_token (parser);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum tree_code code, const char *name)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
error ("too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
If KIND is nonzero, create the appropriate node and install the decl
in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created. */
static tree
c_parser_omp_variable_list (c_parser *parser, enum omp_clause_code kind,
tree list)
{
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
c_parser_error (parser, "expected identifier");
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree t = lookup_name (c_parser_peek_token (parser)->value);
if (t == NULL_TREE)
undeclared_variable (c_parser_peek_token (parser)->value,
c_parser_peek_token (parser)->location);
else if (t == error_mark_node)
;
else if (kind != 0)
{
tree u = build_omp_clause (kind);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (t, NULL_TREE, list);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
c_parser_omp_var_list_parens (c_parser *parser, enum tree_code kind, tree list)
{
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
list = c_parser_omp_variable_list (parser, kind, list);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
return list;
}
/* OpenMP 2.5:
copyin ( variable-list ) */
static tree
c_parser_omp_clause_copyin (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list);
}
/* OpenMP 2.5:
copyprivate ( variable-list ) */
static tree
c_parser_omp_clause_copyprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list);
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
c_parser_omp_clause_default (c_parser *parser, tree list)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
tree c;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
c_parser_consume_token (parser);
}
else
{
invalid_kind:
c_parser_error (parser, "expected %<none%> or %<shared%>");
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default");
c = build_omp_clause (OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 2.5:
firstprivate ( variable-list ) */
static tree
c_parser_omp_clause_firstprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list);
}
/* OpenMP 2.5:
if ( expression ) */
static tree
c_parser_omp_clause_if (c_parser *parser, tree list)
{
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree t = c_parser_paren_condition (parser);
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if");
c = build_omp_clause (OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
else
c_parser_error (parser, "expected %<(%>");
return list;
}
/* OpenMP 2.5:
lastprivate ( variable-list ) */
static tree
c_parser_omp_clause_lastprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list);
}
/* OpenMP 2.5:
nowait */
static tree
c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait");
c = build_omp_clause (OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
c_parser_omp_clause_num_threads (c_parser *parser, tree list)
{
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
tree c, t = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2 (LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (c == boolean_true_node)
{
warning (0, "%<num_threads%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads");
c = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 2.5:
ordered */
static tree
c_parser_omp_clause_ordered (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered");
c = build_omp_clause (OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
private ( variable-list ) */
static tree
c_parser_omp_clause_private (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list);
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && || */
static tree
c_parser_omp_clause_reduction (c_parser *parser, tree list)
{
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
enum tree_code code;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, or %<||%>");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
tree nl, c;
nl = c_parser_omp_variable_list (parser, OMP_CLAUSE_REDUCTION, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_REDUCTION_CODE (c) = code;
list = nl;
}
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
return list;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime
*/
static tree
c_parser_omp_clause_schedule (c_parser *parser, tree list)
{
tree c, t;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
c = build_omp_clause (OMP_CLAUSE_SCHEDULE);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (c_parser_next_token_is_keyword (parser, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else
goto invalid_kind;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
t = c_parser_expr_no_commas (parser, NULL).value;
if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error ("schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
else
c_parser_error (parser, "expected integer expression");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenMP 2.5:
shared ( variable-list ) */
static tree
c_parser_omp_clause_shared (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list);
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
c_parser_omp_all_clauses (c_parser *parser, unsigned int mask,
const char *where)
{
tree clauses = NULL;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
const pragma_omp_clause c_kind = c_parser_omp_clause_name (parser);
const char *c_name;
tree prev = clauses;
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = c_parser_omp_clause_copyin (parser, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = c_parser_omp_clause_copyprivate (parser, clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = c_parser_omp_clause_lastprivate (parser, clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = c_parser_omp_clause_nowait (parser, clauses);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = c_parser_omp_clause_num_threads (parser, clauses);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = c_parser_omp_clause_ordered (parser, clauses);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = c_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = c_parser_omp_clause_schedule (parser, clauses);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = c_parser_omp_clause_shared (parser, clauses);
c_name = "shared";
break;
default:
c_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
if (((mask >> c_kind) & 1) == 0 && !parser->error)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error ("%qs is not valid for %qs", c_name, where);
}
}
saw_error:
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_clauses (clauses);
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
c_parser_statement calls add_stmt. */
static tree
c_parser_omp_structured_block (c_parser *parser)
{
tree stmt = push_stmt_list ();
c_parser_statement (parser);
return pop_stmt_list (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type. */
static void
c_parser_omp_atomic (c_parser *parser)
{
tree lhs, rhs;
tree stmt;
enum tree_code code;
c_parser_skip_to_pragma_eol (parser);
lhs = c_parser_unary_expression (parser).value;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
saw_error:
c_parser_skip_to_end_of_block_or_statement (parser);
return;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = PLUS_EXPR;
rhs = integer_one_node;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
code = MINUS_EXPR;
rhs = integer_one_node;
break;
default:
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
default:
c_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
c_parser_consume_token (parser);
rhs = c_parser_expression (parser).value;
break;
}
stmt = c_finish_omp_atomic (code, lhs, rhs);
if (stmt != error_mark_node)
add_stmt (stmt);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* OpenMP 2.5:
# pragma omp barrier new-line
*/
static void
c_parser_omp_barrier (c_parser *parser)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_barrier ();
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block
*/
static tree
c_parser_omp_critical (c_parser *parser)
{
tree stmt, name = NULL;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_error (parser, "expected identifier");
}
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
stmt = c_parser_omp_structured_block (parser);
return c_finish_omp_critical (stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
c_parser_omp_flush (c_parser *parser)
{
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
c_parser_omp_var_list_parens (parser, 0, NULL);
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_flush ();
}
/* Parse the restricted form of the for statment allowed by OpenMP.
The real trick here is to determine the loop control variable early
so that we can push a new decl if necessary to make it private. */
static tree
c_parser_omp_for_loop (c_parser *parser)
{
tree decl, cond, incr, save_break, save_cont, body, init;
location_t loc;
if (!c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_error (parser, "for statement expected");
return NULL;
}
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return NULL;
/* Parse the initialization declaration or expression. */
if (c_parser_next_token_starts_declspecs (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true);
decl = check_for_loop_decls ();
if (decl == NULL)
goto error_init;
init = decl;
}
else if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_EQ)
{
decl = c_parser_postfix_expression (parser).value;
c_parser_require (parser, CPP_EQ, "expected %<=%>");
init = c_parser_expr_no_commas (parser, NULL).value;
init = build_modify_expr (decl, NOP_EXPR, init);
init = c_process_expr_stmt (init);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
goto error_init;
/* Parse the loop condition. */
cond = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
{
cond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (cond);
if (EXPR_P (cond))
SET_EXPR_LOCATION (cond, input_location);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
/* Parse the increment expression. */
incr = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
incr = c_process_expr_stmt (c_parser_expression (parser).value);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
parse_body:
save_break = c_break_label;
c_break_label = size_one_node;
save_cont = c_cont_label;
c_cont_label = NULL_TREE;
body = push_stmt_list ();
add_stmt (c_parser_c99_block_statement (parser));
if (c_cont_label)
add_stmt (build1 (LABEL_EXPR, void_type_node, c_cont_label));
body = pop_stmt_list (body);
c_break_label = save_break;
c_cont_label = save_cont;
/* Only bother calling c_finish_omp_for if we havn't already generated
an error from the initialization parsing. */
if (decl != NULL && decl != error_mark_node && init != error_mark_node)
return c_finish_omp_for (loc, decl, init, cond, incr, body, NULL);
return NULL;
error_init:
c_parser_error (parser, "expected iteration declaration or initialization");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
decl = init = cond = incr = NULL_TREE;
goto parse_body;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop
*/
#define OMP_FOR_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_ORDERED) \
| (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_for (c_parser *parser)
{
tree block, clauses, ret;
clauses = c_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK,
"#pragma omp for");
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (parser);
if (ret)
OMP_FOR_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block
*/
static tree
c_parser_omp_master (c_parser *parser)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_master (c_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
*/
static tree
c_parser_omp_ordered (c_parser *parser)
{
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_ordered (c_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block */
static tree
c_parser_omp_sections_scope (c_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
location_t loc;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Avoid skipping until the end of the block. */
parser->error = false;
return NULL_TREE;
}
stmt = push_stmt_list ();
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION)
{
substmt = push_stmt_list ();
while (1)
{
c_parser_statement (parser);
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
}
substmt = pop_stmt_list (substmt);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
while (1)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
error_suppress = false;
}
else if (!error_suppress)
{
error ("expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = c_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE,
"expected %<#pragma omp section%> or %<}%>");
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
return add_stmt (stmt);
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope
*/
#define OMP_SECTIONS_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_sections (c_parser *parser)
{
tree block, clauses, ret;
clauses = c_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK,
"#pragma omp sections");
block = c_begin_compound_stmt (true);
ret = c_parser_omp_sections_scope (parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma parallel parallel-clause new-line
# pragma parallel for parallel-for-clause new-line
# pragma parallel sections parallel-sections-clause new-line
*/
#define OMP_PARALLEL_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_IF) \
| (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (1u << PRAGMA_OMP_CLAUSE_SHARED) \
| (1u << PRAGMA_OMP_CLAUSE_COPYIN) \
| (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS))
static tree
c_parser_omp_parallel (c_parser *parser)
{
enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL;
const char *p_name = "#pragma omp parallel";
tree stmt, clauses, par_clause, ws_clause, block;
unsigned int mask = OMP_PARALLEL_CLAUSE_MASK;
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_consume_token (parser);
p_kind = PRAGMA_OMP_PARALLEL_FOR;
p_name = "#pragma omp parallel for";
mask |= OMP_FOR_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "sections") == 0)
{
c_parser_consume_token (parser);
p_kind = PRAGMA_OMP_PARALLEL_SECTIONS;
p_name = "#pragma omp parallel sections";
mask |= OMP_SECTIONS_CLAUSE_MASK;
mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT);
}
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name);
switch (p_kind)
{
case PRAGMA_OMP_PARALLEL:
block = c_begin_omp_parallel ();
c_parser_statement (parser);
stmt = c_finish_omp_parallel (clauses, block);
break;
case PRAGMA_OMP_PARALLEL_FOR:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
stmt = c_parser_omp_for_loop (parser);
if (stmt)
OMP_FOR_CLAUSES (stmt) = ws_clause;
stmt = c_finish_omp_parallel (par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
case PRAGMA_OMP_PARALLEL_SECTIONS:
block = c_begin_omp_parallel ();
c_split_parallel_clauses (clauses, &par_clause, &ws_clause);
stmt = c_parser_omp_sections_scope (parser);
if (stmt)
OMP_SECTIONS_CLAUSES (stmt) = ws_clause;
stmt = c_finish_omp_parallel (par_clause, block);
OMP_PARALLEL_COMBINED (stmt) = 1;
break;
default:
gcc_unreachable ();
}
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block
*/
#define OMP_SINGLE_CLAUSE_MASK \
( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (1u << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_single (c_parser *parser)
{
tree stmt = make_node (OMP_SINGLE);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single");
OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* Main entry point to parsing most OpenMP pragmas. */
static void
c_parser_omp_construct (c_parser *parser)
{
enum pragma_kind p_kind;
location_t loc;
tree stmt;
loc = c_parser_peek_token (parser)->location;
p_kind = c_parser_peek_token (parser)->pragma_kind;
c_parser_consume_pragma (parser);
/* For all constructs below except #pragma omp atomic
MUST_NOT_THROW catch handlers are needed when exceptions
are enabled. */
if (p_kind != PRAGMA_OMP_ATOMIC)
c_maybe_initialize_eh ();
switch (p_kind)
{
case PRAGMA_OMP_ATOMIC:
c_parser_omp_atomic (parser);
return;
case PRAGMA_OMP_CRITICAL:
stmt = c_parser_omp_critical (parser);
break;
case PRAGMA_OMP_FOR:
stmt = c_parser_omp_for (parser);
break;
case PRAGMA_OMP_MASTER:
stmt = c_parser_omp_master (parser);
break;
case PRAGMA_OMP_ORDERED:
stmt = c_parser_omp_ordered (parser);
break;
case PRAGMA_OMP_PARALLEL:
stmt = c_parser_omp_parallel (parser);
break;
case PRAGMA_OMP_SECTIONS:
stmt = c_parser_omp_sections (parser);
break;
case PRAGMA_OMP_SINGLE:
stmt = c_parser_omp_single (parser);
break;
default:
gcc_unreachable ();
}
if (stmt)
SET_EXPR_LOCATION (stmt, loc);
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
c_parser_omp_threadprivate (c_parser *parser)
{
tree vars, t;
c_parser_consume_pragma (parser);
vars = c_parser_omp_var_list_parens (parser, 0, NULL);
if (!targetm.have_tls)
sorry ("threadprivate variables not supported in this target");
/* Mark every variable in VARS to be assigned thread local storage. */
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* If V had already been marked threadprivate, it doesn't matter
whether it had been used prior to this point. */
if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v))
error ("%qE declared %<threadprivate%> after first use", v);
else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v))
error ("automatic variable %qE cannot be %<threadprivate%>", v);
else if (! COMPLETE_TYPE_P (TREE_TYPE (v)))
error ("%<threadprivate%> %qE has incomplete type", v);
else
{
if (! DECL_THREAD_LOCAL_P (v))
{
DECL_TLS_MODEL (v) = decl_default_tls_model (v);
/* If rtl has been already set for this var, call
make_decl_rtl once again, so that encode_section_info
has a chance to look at the new decl flags. */
if (DECL_RTL_SET_P (v))
make_decl_rtl (v);
}
C_DECL_THREADPRIVATE_P (v) = 1;
}
}
c_parser_skip_to_pragma_eol (parser);
}
/* Parse a single source file. */
void
c_parse_file (void)
{
/* Use local storage to begin. If the first token is a pragma, parse it.
If it is #pragma GCC pch_preprocess, then this will load a PCH file
which will cause garbage collection. */
c_parser tparser;
memset (&tparser, 0, sizeof tparser);
the_parser = &tparser;
if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS)
c_parser_pragma_pch_preprocess (&tparser);
the_parser = GGC_NEW (c_parser);
*the_parser = tparser;
c_parser_translation_unit (the_parser);
the_parser = NULL;
}
#include "gt-c-parser.h"
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
GxB_UnaryOp_ztype_name.c | //------------------------------------------------------------------------------
// GxB_UnaryOp_ztype_name: return the type_name of z for z=f(x)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_UnaryOp_ztype_name // return the name of the type of z
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_UnaryOp unaryop
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_UnaryOp_ztype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (unaryop) ;
ASSERT_UNARYOP_OK (unaryop, "unaryop for ztype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, unaryop->ztype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
revert_kernel_c.c | /*Crown Copyright 2012 AWE.
*
* This file is part of CloverLeaf.
*
* CloverLeaf is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* CloverLeaf is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* CloverLeaf. If not, see http://www.gnu.org/licenses/. */
/**
* @brief C revert kernel.
* @author Wayne Gaudin
* @details Takes the half step field data used in the predictor and reverts
* it to the start of step data, ready for the corrector.
* Note that this does not seem necessary in this proxy-app but should be
* left in to remain relevant to the full method.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ftocmacros.h"
#include <math.h>
void revert_kernel_c_(int *xmin,int *xmax,int *ymin,int *ymax,
double *density0,
double *density1,
double *energy0,
double *energy1)
{
int x_min=*xmin;
int x_max=*xmax;
int y_min=*ymin;
int y_max=*ymax;
int j,k;
#pragma omp parallel
{
#pragma omp for private(j)
for (k=y_min;k<=y_max;k++) {
#pragma ivdep
for (j=x_min;j<=x_max;j++) {
density1[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]=density0[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)];
}
}
#pragma omp for private(j)
for (k=y_min;k<=y_max;k++) {
#pragma ivdep
for (j=x_min;j<=x_max;j++) {
energy1[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]=energy0[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)];
}
}
}
}
|
knn_utils.h | /*
*
* Copyright (c) 2019, BIOVAULT (Leiden University Medical Center, Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BIOVAULT ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef KNN_H
#define KNN_H
#include <vector>
#include <map>
#include <string>
#include <thread>
#include <mutex>
namespace hdi{
namespace dr{
enum knn_library
{
KNN_FLANN = -1,
KNN_HNSW = 0,
KNN_ANNOY = 1
};
enum knn_distance_metric
{
KNN_METRIC_EUCLIDEAN = 0,
KNN_METRIC_COSINE = 1,
KNN_METRIC_INNER_PRODUCT = 2,
KNN_METRIC_MANHATTAN = 3,
KNN_METRIC_HAMMING = 4,
KNN_METRIC_DOT = 5
};
//! Returns the number of supported KNN libraries.
//! This function should be considered deprecated and kept for backward comppatibility. New code should use the supported_knn_libraries function.
int HierarchicalSNE_NrOfKnnAlgorithms();
//! Returns both the name/label and index of the supported KNN libraries since this can depend on compiler support.
//! This function is especially useful for building user-interfaces where the user can select which KNN library to use for a specific task (e.g. t-SNE or HSNE).
//! Alternatively it can be used to offer a look-up table to translate the currently set KNN Library index back to human readable text.
std::map<std::string, int> supported_knn_libraries();
//! Returns the name/label and index of distance metrics supported by a specific KNN library.
//! This function is especially useful for building user-interfaces where the user can select both a KNN library and a distance metric since not every KNN library supports the same distance metric.
//! Alternatively it can be used to offer a look-up table to translate the currently set KNN distance metric index back to human readable text.
std::map<std::string, int> supported_knn_library_distance_metrics(int knn_lib);
}
}
namespace hnswlib {
/*
* replacement for the openmp '#pragma omp parallel for' directive
* only handles a subset of functionality (no reductions etc)
* Process ids from start (inclusive) to end (EXCLUSIVE)
*
* The method is borrowed from nmslib https://github.com/nmslib/nmslib/blob/v2.1.1/similarity_search/include/thread_pool.h#L62
* and used in the hnswlib examples as well https://github.com/nmslib/hnswlib/blob/v0.5.0/examples/updates_test.cpp
*/
template<class Function>
inline void ParallelFor(size_t start, size_t end, size_t numThreads, Function fn) {
if (numThreads <= 0) {
numThreads = std::thread::hardware_concurrency();
}
if (numThreads == 1) {
for (size_t id = start; id < end; id++) {
fn(id, 0);
}
}
else {
std::vector<std::thread> threads;
std::atomic<size_t> current(start);
// keep track of exceptions in threads
// https://stackoverflow.com/a/32428427/1713196
std::exception_ptr lastException = nullptr;
std::mutex lastExceptMutex;
for (size_t threadId = 0; threadId < numThreads; ++threadId) {
threads.push_back(std::thread([&, threadId] {
while (true) {
size_t id = current.fetch_add(1);
if ((id >= end)) {
break;
}
try {
fn(id, threadId);
}
catch (...) {
std::unique_lock<std::mutex> lastExcepLock(lastExceptMutex);
lastException = std::current_exception();
/*
* This will work even when current is the largest value that
* size_t can fit, because fetch_add returns the previous value
* before the increment (what will result in overflow
* and produce 0 instead of current + 1).
*/
current = end;
break;
}
}
}));
}
for (auto &thread : threads) {
thread.join();
}
if (lastException) {
std::rethrow_exception(lastException);
}
}
}
}
#endif // KNN_H
|
optimizer.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
opt0->q_cond = NULL;
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
opt0->dm_cond = NULL;
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((4*opt->dm_cond[j*n+i] > dmin)
|| (4*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin)
|| ( opt->dm_cond[i*n+k] > dmin)
|| ( opt->dm_cond[i*n+l] > dmin));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
opt->q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const int nao = ao_loc[nbas];
double dmax, tmp;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = fabs(pdm[i*nao+j]);
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
} }
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, intor, cintopt, ao_loc,
atm, natm, bas, nbas, env);
}
|
run_sign_verify.c | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/*
Sign a message and verify the signature
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <amcl/utils.h>
#include <amcl/randapi.h>
#include <amcl/bls_BLS381.h>
#include <oqs/oqs.h>
#include <pqnist/pqnist.h>
#define NTHREADS 8
#define MAXSIZE 256
#define G2LEN 4*BFS_BLS381
#define SIGLEN BFS_BLS381+1
int main()
{
int i,rc;
// Seed value for CSPRNG
char seed[NTHREADS][PQNIST_SEED_LENGTH];
// Message to be sent to Bob
char p[NTHREADS][MAXSIZE];
octet P[NTHREADS];
// BLS signature
char s[NTHREADS][SIGLEN];
octet S[NTHREADS];
// Initialise seed
for(i=0; i<NTHREADS; i++)
{
for(int j=0; j<PQNIST_SEED_LENGTH; j++)
{
seed[i][j] = i;
}
}
// Generate SIKE and BLS keys
// Bob's SIKE keys (not used)
uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key];
uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key];
// Alice's BLS keys
char BLSsk[NTHREADS][BGS_BLS381];
char BLSpk[NTHREADS][G2LEN];
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
rc = pqnist_keys(seed[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]);
if (rc)
{
fprintf(stderr, "ERROR pqnist_keys rc: %d\n", rc);
exit(EXIT_FAILURE);
}
printf("BLS pklen %d pk: ", G2LEN);
amcl_print_hex(BLSpk[i], G2LEN);
printf("BLS sklen %d BLS sk: ", BGS_BLS381);
amcl_print_hex(BLSsk[i], BGS_BLS381);
printf("\n");
}
// Alice
for(i=0; i<NTHREADS; i++)
{
bzero(p[i],sizeof(p[i]));
P[i].max = MAXSIZE;
P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i);
P[i].val = p[i];
printf("Alice Plaintext: ");
OCT_output_string(&P[i]);
printf("\n");
}
for(i=0; i<NTHREADS; i++)
{
bzero(s[i],sizeof(s[i]));
S[i].max = SIGLEN;
S[i].len = SIGLEN;
S[i].val = s[i];
}
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Alice signs message
rc = pqnist_sign(P[i].val, BLSsk[i], S[i].val);
if(rc)
{
fprintf(stderr, "ERROR pqnist_sign rc: %d\n", rc);
printf("FAILURE\n");
exit(EXIT_FAILURE);
}
printf("Alice SIGlen %d SIG", S[i].len);
OCT_output(&S[i]);
printf("\n");
}
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Bob verifies message
rc = pqnist_verify(P[i].val, BLSpk[i], S[i].val);
if (rc)
{
fprintf(stderr, "ERROR pqnist_verify rc: %d\n", rc);
exit(EXIT_FAILURE);
}
else
{
printf("SUCCESS Test %d pqnist_verify rc: %d\n", i, rc);
OCT_output_string(&P[i]);
printf("\n");
}
}
// clear memory
for(i=0; i<NTHREADS; i++)
{
OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key);
OQS_MEM_cleanse(BLSsk[i], OQS_SIG_picnic_L5_FS_length_secret_key);
OCT_clear(&P[i]);
OCT_clear(&S[i]);
}
printf("TEST PASSED\n");
exit(EXIT_SUCCESS);
}
|
displacement_op_cuda.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef DISPLACEMENT_OP_CUDA_H_
#define DISPLACEMENT_OP_CUDA_H_
#include <vector>
#include "bound_space_op.h"
#include "gpu/displacement_op_cuda_kernel.h"
#include "log.h"
#include "resource_manager.h"
#include "shape.h"
#include "type_util.h"
namespace bdm {
using std::array;
/// Defines the 3D physical interactions between physical objects
template <typename TGrid = Grid<>>
class DisplacementOpCuda {
public:
DisplacementOpCuda() {}
~DisplacementOpCuda() {}
template <typename TContainer>
typename std::enable_if<is_soa_sphere<TContainer>::value>::type operator()(
TContainer* cells, uint16_t type_idx) {
auto& grid = TGrid::GetInstance();
std::vector<std::array<double, 3>> cell_movements(cells->size());
std::vector<double> mass(cells->size());
std::vector<uint32_t> starts;
std::vector<uint16_t> lengths;
std::vector<uint32_t> successors(cells->size());
uint32_t box_length;
uint32_t num_objects = cells->size();
std::array<uint32_t, 3> num_boxes_axis;
std::array<int32_t, 3> grid_dimensions;
double squared_radius =
grid.GetLargestObjectSize() * grid.GetLargestObjectSize();
// We need to create a mass vector, because it is not stored by default in
// a cell container
cells->FillMassVector(&mass);
grid.GetSuccessors(&successors);
grid.GetBoxInfo(&starts, &lengths);
grid.GetGridInfo(&box_length, &num_boxes_axis, &grid_dimensions);
// If this is the first time we perform physics on GPU using CUDA
if (cdo_ == nullptr) {
// Allocate 25% more memory so we don't need to reallocate GPU memory
// for every (small) change
uint32_t new_num_objects = static_cast<uint32_t>(1.25 * num_objects);
uint32_t new_num_boxes = static_cast<uint32_t>(1.25 * starts.size());
// Store these extende buffer sizes for future reference
num_objects_ = new_num_objects;
num_boxes_ = new_num_boxes;
// Allocate required GPU memory
cdo_ = new DisplacementOpCudaKernel(new_num_objects, new_num_boxes);
} else {
// If the number of simulation objects increased
if (num_objects >= num_objects_) {
Log::Info("DisplacementOpCuda",
"\nThe number of cells increased signficantly (from ",
num_objects_, " to ", num_objects,
"), so we allocate bigger GPU buffers\n");
uint32_t new_num_objects = static_cast<uint32_t>(1.25 * num_objects);
num_objects_ = new_num_objects;
cdo_->ResizeCellBuffers(new_num_objects);
}
// If the neighbor grid size increased
if (starts.size() >= num_boxes_) {
Log::Info("DisplacementOpCuda",
"\nThe number of boxes increased signficantly (from ",
num_boxes_, " to ", "), so we allocate bigger GPU buffers\n");
uint32_t new_num_boxes = static_cast<uint32_t>(1.25 * starts.size());
num_boxes_ = new_num_boxes;
cdo_->ResizeGridBuffers(new_num_boxes);
}
}
cdo_->LaunchDisplacementKernel(
cells->GetPositionPtr(), cells->GetDiameterPtr(),
cells->GetTractorForcePtr(), cells->GetAdherencePtr(),
cells->GetBoxIdPtr(), mass.data(), &(Param::simulation_time_step_),
&(Param::simulation_max_displacement_), &squared_radius, &num_objects,
starts.data(), lengths.data(), successors.data(), &box_length,
num_boxes_axis.data(), grid_dimensions.data(),
cell_movements.data()->data());
// set new positions after all updates have been calculated
// otherwise some cells would see neighbors with already updated positions
// which would lead to inconsistencies
#pragma omp parallel for
for (size_t i = 0; i < cells->size(); i++) {
auto&& cell = (*cells)[i];
cell.UpdatePosition(cell_movements[i]);
if (Param::bound_space_) {
ApplyBoundingBox(&cell, Param::min_bound_, Param::max_bound_);
}
cell.SetPosition(cell.GetPosition());
// Reset biological movement to 0.
cell.SetTractorForce({0, 0, 0});
}
}
template <typename TContainer>
typename std::enable_if<!is_soa_sphere<TContainer>::value>::type operator()(
TContainer* cells, uint16_t type_idx) {
Fatal("DisplacementOpCuda",
"You tried to compile GPU-specific function calls for a non-SOA data "
"structure or non-spherical simulation object.");
}
private:
DisplacementOpCudaKernel* cdo_ = nullptr;
uint32_t num_boxes_ = 0;
uint32_t num_objects_ = 0;
};
} // namespace bdm
#endif // DISPLACEMENT_OP_CUDA_H_
|
pfmg2_setup_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.20 $
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "pfmg.h"
/*--------------------------------------------------------------------------
* Macro to "change coordinates". This routine is written as though
* coarsening is being done in the y-direction. This macro is used to
* allow for coarsening to be done in the x-direction also.
*--------------------------------------------------------------------------*/
#define MapIndex(in_index, cdir, out_index) \
hypre_IndexD(out_index, 2) = hypre_IndexD(in_index, 2); \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \
cdir = (cdir + 1) % 2; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \
cdir = (cdir + 1) % 2;
/*--------------------------------------------------------------------------
* Sets up new coarse grid operator stucture.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_PFMG2CreateRAPOp( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_Index index_temp;
HYPRE_Int j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 2;
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
/*--------------------------------------------------------------------
* 5 or 9 point fine grid stencil produces 9 point RAP
*--------------------------------------------------------------------*/
RAP_stencil_size = 9;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Storage for 9 elements (c,w,e,n,s,sw,se,nw,se)
*--------------------------------------------------------------*/
hypre_SetIndex(index_temp,i,j,0);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
/*-----------------------------------------------------------------------
* symmetric case
*-----------------------------------------------------------------------*/
else
{
/*--------------------------------------------------------------------
* 5 or 9 point fine grid stencil produces 9 point RAP
* Only store the lower triangular part + diagonal = 5 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*--------------------------------------------------------------------*/
RAP_stencil_size = 5;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (j = -1; j < 1; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Store 5 elements in (c,w,s,sw,se)
*--------------------------------------------------------------*/
if( i+j <=0 )
{
hypre_SetIndex(index_temp,i,j,0);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* Routines to build RAP. These routines are fairly general
* 1) No assumptions about symmetry of A
* 2) No assumption that R = transpose(P)
* 3) 5 or 9-point fine grid A
*
* I am, however, assuming that the c-to-c interpolation is the identity.
*
* I've written two routines - hypre_PFMG2BuildRAPSym to build the
* lower triangular part of RAP (including the diagonal) and
* hypre_PFMG2BuildRAPNoSym to build the upper triangular part of RAP
* (excluding the diagonal). So using symmetric storage, only the
* first routine would be called. With full storage both would need to
* be called.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMG2BuildRAPSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
HYPRE_Int constant_coefficient;
HYPRE_Int constant_coefficient_A;
HYPRE_Int fi, ci;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
hypre_assert( constant_coefficient==0 || constant_coefficient==1 );
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == constant_coefficient );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == constant_coefficient );
if (constant_coefficient==1 )
{
hypre_assert( constant_coefficient_A==1 );
}
else
{
hypre_assert( constant_coefficient_A==0 || constant_coefficient_A==2 );
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 9-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for symmetric 5-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
case 5:
if ( constant_coefficient==1 )
{
hypre_PFMG2BuildRAPSym_onebox_FSS5_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG2BuildRAPSym_onebox_FSS5_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
/*--------------------------------------------------------------
* Loop for symmetric 9-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
default:
if ( constant_coefficient==1 )
{
hypre_PFMG2BuildRAPSym_onebox_FSS9_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG2BuildRAPSym_onebox_FSS9_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 5, constant coefficient 0 */
HYPRE_Int
hypre_PFMG2BuildRAPSym_onebox_FSS5_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
hypre_Box *cgrid_box;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double a_cw_offd, a_cw_offdm1, a_cw_offdp1, a_ce_offdm1;
double a_cs_offd, a_cs_offdm1, a_cs_offdp1, a_cn_offd, a_cn_offdm1;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
yOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------
* Loop for symmetric 5-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - yOffsetA_offd;
iA_offdp1 = iA_offd + yOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cs_offd = a_cs[iA_offd];
a_cs_offdm1 = a_cs[iA_offdm1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cw_offd = a_cw[iA_offd];
a_cw_offdp1 = a_cw[iA_offdp1];
a_cw_offdm1 = a_cw[iA_offdm1];
a_ce_offdm1 = a_ce[iA_offdm1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA_diag;
iAp1 = iA + yOffsetA_diag;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs_offdm1
+ a_cs_offd * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw_offd
+ rb[iR] * a_cw_offdm1 * pb[iP1]
+ ra[iR] * a_cw_offdp1 * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn_offdm1
+ ra[iR] * a_cs_offdp1
+ a_cs_offd * pb[iP]
+ a_cn_offd * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* } *//* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 5, constant coefficient 1 */
HYPRE_Int
hypre_PFMG2BuildRAPSym_onebox_FSS5_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
/*
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 9-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 5-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
/* } *//* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 9, constant coefficient 0 */
HYPRE_Int
hypre_PFMG2BuildRAPSym_onebox_FSS9_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_csw, *a_cse, *a_cnw;
double a_cw_offd, a_cw_offdm1, a_cw_offdp1, a_ce_offdm1;
double a_cs_offd, a_cs_offdm1, a_cs_offdp1, a_cn_offd, a_cn_offdm1;
double a_csw_offd, a_csw_offdm1, a_csw_offdp1, a_cse_offd, a_cse_offdm1;
double a_cnw_offd, a_cnw_offdm1;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
yOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------
* Loop for symmetric 9-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_csw[iAm1]
+ a_csw[iA] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_cse[iAm1]
+ a_cse[iA] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ rb[iR] * a_cnw[iAm1]
+ ra[iR] * a_csw[iAp1]
+ a_csw[iA] * pb[iP1]
+ a_cnw[iA] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - yOffsetA_offd;
iA_offdp1 = iA_offd + yOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdm1 = a_cn[iA_offdm1];
a_cs_offd = a_cs[iA_offd];
a_cs_offdm1 = a_cs[iA_offdm1];
a_cs_offdp1 = a_cs[iA_offdp1];
a_cw_offd = a_cw[iA_offd];
a_cw_offdp1 = a_cw[iA_offdp1];
a_cw_offdm1 = a_cw[iA_offdm1];
a_ce_offdm1 = a_ce[iA_offdm1];
a_csw_offd = a_csw[iA_offd];
a_csw_offdm1 = a_csw[iA_offdm1];
a_csw_offdp1 = a_csw[iA_offdp1];
a_cse_offd = a_cse[iA_offd];
a_cse_offdm1 = a_cse[iA_offdm1];
a_cnw_offd = a_cnw[iA_offd];
a_cnw_offdm1 = a_cnw[iA_offdm1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA_diag;
iAp1 = iA + yOffsetA_diag;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1]
+ rb[iR] * a_csw_offdm1
+ a_csw_offd * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs_offdm1
+ a_cs_offd * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1]
+ rb[iR] * a_cse_offdm1
+ a_cse_offd * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw_offd
+ rb[iR] * a_cw_offdm1 * pb[iP1]
+ ra[iR] * a_cw_offdp1 * pa[iP1]
+ rb[iR] * a_cnw_offdm1
+ ra[iR] * a_csw_offdp1
+ a_csw_offd * pb[iP1]
+ a_cnw_offd * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn_offdm1
+ ra[iR] * a_cs_offdp1
+ a_cs_offd * pb[iP]
+ a_cn_offd * pa[iP];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 9, constant coefficient 1 */
HYPRE_Int
hypre_PFMG2BuildRAPSym_onebox_FSS9_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn;
double *a_csw, *a_cse, *a_cnw;
double *rap_cc, *rap_cw, *rap_cs;
double *rap_csw, *rap_cse;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the lower triangular part (plus diagonal).
*
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,-1,0);
MapIndex(index_temp, cdir, index);
rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to apropriate BoxLoop depending
* on stencil size. Default is full 9-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for symmetric 9-point fine grid operator; produces a
* symmetric 9-point coarse grid operator. We calculate only the
* lower triangular stencil entries: (southwest, south, southeast,
* west, and center).
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP - yOffsetP - xOffsetP;
rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]
+ rb[iR] * a_csw[iAm1]
+ a_csw[iA] * pa[iP1];
iP1 = iP - yOffsetP;
rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1]
+ rb[iR] * a_cs[iAm1]
+ a_cs[iA] * pa[iP1];
iP1 = iP - yOffsetP + xOffsetP;
rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]
+ rb[iR] * a_cse[iAm1]
+ a_cse[iA] * pa[iP1];
iP1 = iP - xOffsetP;
rap_cw[iAc] = a_cw[iA]
+ rb[iR] * a_cw[iAm1] * pb[iP1]
+ ra[iR] * a_cw[iAp1] * pa[iP1]
+ rb[iR] * a_cnw[iAm1]
+ ra[iR] * a_csw[iAp1]
+ a_csw[iA] * pb[iP1]
+ a_cnw[iA] * pa[iP1];
rap_cc[iAc] = a_cc[iA]
+ rb[iR] * a_cc[iAm1] * pb[iP]
+ ra[iR] * a_cc[iAp1] * pa[iP]
+ rb[iR] * a_cn[iAm1]
+ ra[iR] * a_cs[iAp1]
+ a_cs[iA] * pb[iP]
+ a_cn[iA] * pa[iP];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMG2BuildRAPNoSym( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_StructStencil *fine_stencil;
HYPRE_Int fine_stencil_size;
hypre_StructGrid *fgrid;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
HYPRE_Int *cgrid_ids;
HYPRE_Int fi, ci;
HYPRE_Int constant_coefficient;
HYPRE_Int constant_coefficient_A;
fine_stencil = hypre_StructMatrixStencil(A);
fine_stencil_size = hypre_StructStencilSize(fine_stencil);
fgrid = hypre_StructMatrixGrid(A);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
if (constant_coefficient)
{
hypre_assert( hypre_StructMatrixConstantCoefficient(R) );
hypre_assert( hypre_StructMatrixConstantCoefficient(A) );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) );
}
else
{
/* hypre_assert( hypre_StructMatrixConstantCoefficient(R)==0 );
hypre_assert( hypre_StructMatrixConstantCoefficient(A)==0 );
hypre_assert( hypre_StructMatrixConstantCoefficient(P)==0 );
*/
}
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
/*-----------------------------------------------------------------
* Switch statement to direct control to appropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
switch (fine_stencil_size)
{
/*--------------------------------------------------------------
* Loop for 5-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
case 5:
if ( constant_coefficient==1 )
{
hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
/*--------------------------------------------------------------
* Loop for 9-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
default:
if ( constant_coefficient==1 )
{
hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC1(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
else
{
hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC0(
ci, fi, A, P, R, cdir, cindex, cstride, RAP );
}
break;
} /* end switch statement */
} /* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 5, constant coefficient 0 */
HYPRE_Int
hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cn;
double a_cn_offd, a_cn_offdp1, a_cw_offdp1;
double a_ce_offd, a_ce_offdm1, a_ce_offdp1;
double *rap_ce, *rap_cn;
double *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
/*hypre_printf("nosym 5.0\n");*/
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the upper triangular part.
*
* rap_ce is pointer for east coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
hypre_assert( constant_coefficient_A==2 );
yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
yOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*--------------------------------------------------------------
* Loop for 5-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A == 0 )
{
/*hypre_printf("nosym 5.0.0\n");*/
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
hypre_assert( constant_coefficient_A==2 );
/*hypre_printf("nosym 5.0.2\n"); */
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - yOffsetA_offd;
iA_offdp1 = iA_offd + yOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdp1 = a_cn[iA_offdp1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_ce_offd = a_ce[iA_offd];
a_ce_offdm1 = a_ce[iA_offdm1];
a_ce_offdp1 = a_ce[iA_offdp1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAp1 = iA + yOffsetA_diag;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn_offdp1
+ a_cn_offd * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce_offd
+ rb[iR] * a_ce_offdm1 * pb[iP1]
+ ra[iR] * a_ce_offdp1 * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 5, constant coefficient 1 */
HYPRE_Int
hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cn;
double *rap_ce, *rap_cn;
double *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
/* hypre_printf("nosym 5.1\n");*/
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the upper triangular part.
*
* rap_ce is pointer for east coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to appropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 5-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 9, constant coefficient 0 */
HYPRE_Int
hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC0(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Index loop_size;
HYPRE_Int constant_coefficient_A;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cn;
double *a_cse, *a_cnw, *a_cne;
double a_cn_offd, a_cn_offdp1, a_cw_offdp1;
double a_ce_offd, a_ce_offdm1, a_ce_offdp1;
double a_cne_offd, a_cne_offdm1, a_cne_offdp1;
double a_cse_offd, a_cse_offdp1, a_cnw_offd, a_cnw_offdp1;
double *rap_ce, *rap_cn;
double *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
/*hypre_printf("nosym 9.0\n");*/
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_BoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the upper triangular part.
*
* rap_ce is pointer for east coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
if ( constant_coefficient_A == 0 )
{
yOffsetA = hypre_BoxOffsetDistance(A_dbox,index);
}
else
{
hypre_assert( constant_coefficient_A==2 );
yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index);
yOffsetA_offd = hypre_CCBoxOffsetDistance(A_dbox,index);
}
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_BoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to appropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 9-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
hypre_BoxGetSize(cgrid_box, loop_size);
if ( constant_coefficient_A==0 )
{
/*hypre_printf("nosym 9.0.0\n");*/
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_cne[iAp1]
+ a_cne[iA] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1]
+ a_cnw[iA] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ rb[iR] * a_cne[iAm1]
+ ra[iR] * a_cse[iAp1]
+ a_cse[iA] * pb[iP1]
+ a_cne[iA] * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
else
{
/*hypre_printf("nosym 9.0.2\n");*/
hypre_assert( constant_coefficient_A==2 );
iA_offd = hypre_CCBoxIndexRank(A_dbox,fstart);
iA_offdm1 = iA_offd - yOffsetA_offd;
iA_offdp1 = iA_offd + yOffsetA_offd;
a_cn_offd = a_cn[iA_offd];
a_cn_offdp1 = a_cn[iA_offdp1];
a_cw_offdp1 = a_cw[iA_offdp1];
a_ce_offd = a_ce[iA_offd];
a_ce_offdm1 = a_ce[iA_offdm1];
a_ce_offdp1 = a_ce[iA_offdp1];
a_cne_offd = a_cne[iA_offd];
a_cne_offdm1 = a_cne[iA_offdm1];
a_cne_offdp1 = a_cne[iA_offdp1];
a_cse_offd = a_cse[iA_offd];
a_cse_offdp1 = a_cse[iA_offdp1];
a_cnw_offd = a_cnw[iA_offd];
a_cnw_offdp1 = a_cnw[iA_offdp1];
hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size,
P_dbox, cstart, stridec, iP,
R_dbox, cstart, stridec, iR,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop4For(iP, iR, iA, iAc)
{
iAm1 = iA - yOffsetA_diag;
iAp1 = iA + yOffsetA_diag;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1]
+ ra[iR] * a_cne_offdp1
+ a_cne_offd * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn_offdp1
+ a_cn_offd * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1]
+ ra[iR] * a_cnw_offdp1
+ a_cnw_offd * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce_offd
+ rb[iR] * a_ce_offdm1 * pb[iP1]
+ ra[iR] * a_ce_offdp1 * pa[iP1]
+ rb[iR] * a_cne_offdm1
+ ra[iR] * a_cse_offdp1
+ a_cse_offd * pb[iP1]
+ a_cne_offd * pa[iP1];
}
hypre_BoxLoop4End(iP, iR, iA, iAc);
}
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
/* for fine stencil size 9, constant coefficient 1 */
HYPRE_Int
hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC1(
HYPRE_Int ci,
HYPRE_Int fi,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_IndexRef cstart;
hypre_Index stridec;
hypre_Index fstart;
hypre_IndexRef stridef;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *R_dbox;
hypre_Box *RAP_dbox;
double *pa, *pb;
double *ra, *rb;
double *a_cc, *a_cw, *a_ce, *a_cn;
double *a_cse, *a_cnw, *a_cne;
double *rap_ce, *rap_cn;
double *rap_cnw, *rap_cne;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iP1;
HYPRE_Int iR;
HYPRE_Int yOffsetA;
HYPRE_Int xOffsetP;
HYPRE_Int yOffsetP;
/*hypre_printf("nosym 9.1\n");*/
stridef = cstride;
hypre_SetIndex(stridec, 1, 1, 1);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
/* fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
*/
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pa is pointer for weight for f-point above c-point
* pb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_CCBoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for restriction operator:
* ra is pointer for weight for f-point above c-point
* rb is pointer for weight for f-point below c-point
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) -
hypre_CCBoxOffsetDistance(R_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 5-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract additional pointers for 9-point fine grid operator:
*
* a_csw is pointer for southwest coefficient
* a_cse is pointer for southeast coefficient
* a_cnw is pointer for northwest coefficient
* a_cne is pointer for northeast coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,-1,0);
MapIndex(index_temp, cdir, index);
a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator - always 9-point:
*
* We build only the upper triangular part.
*
* rap_ce is pointer for east coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,1,1,0);
MapIndex(index_temp, cdir, index);
rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex(index_temp,-1,1,0);
MapIndex(index_temp, cdir, index);
rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
yOffsetA = hypre_CCBoxOffsetDistance(A_dbox,index);
yOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
hypre_SetIndex(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
xOffsetP = hypre_CCBoxOffsetDistance(P_dbox,index);
/*-----------------------------------------------------------------
* Switch statement to direct control to appropriate BoxLoop depending
* on stencil size. Default is full 27-point.
*-----------------------------------------------------------------*/
/*--------------------------------------------------------------
* Loop for 9-point fine grid operator; produces upper triangular
* part of 9-point coarse grid operator - excludes diagonal.
* stencil entries: (northeast, north, northwest, and east)
*--------------------------------------------------------------*/
iP = hypre_CCBoxIndexRank(P_dbox,cstart);
iR = hypre_CCBoxIndexRank(R_dbox,cstart);
iA = hypre_CCBoxIndexRank(A_dbox,fstart);
iAc = hypre_CCBoxIndexRank(RAP_dbox,cstart);
iAm1 = iA - yOffsetA;
iAp1 = iA + yOffsetA;
iP1 = iP + yOffsetP + xOffsetP;
rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]
+ ra[iR] * a_cne[iAp1]
+ a_cne[iA] * pb[iP1];
iP1 = iP + yOffsetP;
rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1]
+ ra[iR] * a_cn[iAp1]
+ a_cn[iA] * pb[iP1];
iP1 = iP + yOffsetP - xOffsetP;
rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]
+ ra[iR] * a_cnw[iAp1]
+ a_cnw[iA] * pb[iP1];
iP1 = iP + xOffsetP;
rap_ce[iAc] = a_ce[iA]
+ rb[iR] * a_ce[iAm1] * pb[iP1]
+ ra[iR] * a_ce[iAp1] * pa[iP1]
+ rb[iR] * a_cne[iAm1]
+ ra[iR] * a_cse[iAp1]
+ a_cse[iA] * pb[iP1]
+ a_cne[iA] * pa[iP1];
/* }*/ /* end ForBoxI */
return hypre_error_flag;
}
|
vel_pr_criteria.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#ifndef KRATOS_VEL_PR_CRITERIA_H
#define KRATOS_VEL_PR_CRITERIA_H
/* Project includes */
#include "utilities/openmp_utils.h"
#include "includes/model_part.h"
#include "includes/define.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
namespace Kratos
{
///@addtogroup IncompressibleFluidApplication
///@{
///@name Kratos Classes
///@{
/// Convergence criteria for fluid problems.
/**
This class implements a convergence control based on nodal velocity and
pressure values. The error is evaluated separately for each of them, and
relative and absolute tolerances for both must be specified.
*/
template< class TSparseSpace,
class TDenseSpace >
class VelPrCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( VelPrCriteria );
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef OpenMPUtils::PartitionVector PartitionVector;
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/**
* @param VelRatioTolerance Relative tolerance for velocity error
* @param VelAbsTolerance Absolute tolerance for velocity error
* @param PrsRatioTolerance Relative tolerance for presssure error
* @param PrsAbsTolerance Absolute tolerance for presssure error
*/
VelPrCriteria( TDataType VelRatioTolerance,
TDataType VelAbsTolerance,
TDataType PrsRatioTolerance,
TDataType PrsAbsTolerance)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >()
{
mVelRatioTolerance = VelRatioTolerance;
mVelAbsTolerance = VelAbsTolerance;
mPrRatioTolerance = PrsRatioTolerance;
mPrAbsTolerance = PrsAbsTolerance;
}
/// Destructor.
~VelPrCriteria() override {}
///@}
///@name Operators
///@{
/// Compute relative and absoute error.
/**
* @param rModelPart Reference to the ModelPart containing the fluid problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria( ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b ) override
{
if (SparseSpaceType::Size(Dx) != 0) //if we are solving for something
{
// Initialize
TDataType VelSolutionNorm = 0.0;
TDataType PrSolutionNorm = 0.0;
TDataType VelIncreaseNorm = 0.0;
TDataType PrIncreaseNorm = 0.0;
unsigned int VelDofNum(0),PrDofNum(0);
// Set a partition for OpenMP
int NumDofs = rDofSet.size();
PartitionVector DofPartition;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions(NumDofs,NumThreads,DofPartition);
// Loop over Dofs
#pragma omp parallel reduction(+:VelSolutionNorm,PrSolutionNorm,VelIncreaseNorm,PrIncreaseNorm,VelDofNum,PrDofNum)
{
int k = OpenMPUtils::ThisThread();
typename DofsArrayType::iterator DofBegin = rDofSet.begin() + DofPartition[k];
typename DofsArrayType::iterator DofEnd = rDofSet.begin() + DofPartition[k+1];
std::size_t DofId;
TDataType DofValue;
TDataType DofIncr;
for (typename DofsArrayType::iterator itDof = DofBegin; itDof != DofEnd; ++itDof)
{
if (itDof->IsFree())
{
DofId = itDof->EquationId();
DofValue = itDof->GetSolutionStepValue(0);
DofIncr = Dx[DofId];
KeyType CurrVar = itDof->GetVariable().Key();
if ((CurrVar == VELOCITY_X) || (CurrVar == VELOCITY_Y) || (CurrVar == VELOCITY_Z))
{
VelSolutionNorm += DofValue * DofValue;
VelIncreaseNorm += DofIncr * DofIncr;
++VelDofNum;
}
else
{
PrSolutionNorm += DofValue * DofValue;
PrIncreaseNorm += DofIncr * DofIncr;
++PrDofNum;
}
}
}
}
if(VelSolutionNorm == 0.0)
VelSolutionNorm = 1.0;
if(PrSolutionNorm == 0.0)
PrSolutionNorm = 1.0;
TDataType VelRatio = sqrt(VelIncreaseNorm/VelSolutionNorm);
TDataType PrRatio = sqrt(PrIncreaseNorm/PrSolutionNorm);
TDataType VelAbs = sqrt(VelIncreaseNorm)/ static_cast<TDataType>(VelDofNum);
TDataType PrAbs = sqrt(PrIncreaseNorm)/ static_cast<TDataType>(PrDofNum);
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0)
{
std::cout << "CONVERGENCE CHECK:" << std::endl;
std::cout << " VELOC.: ratio = " << VelRatio <<"; exp.ratio = " << mVelRatioTolerance << " abs = " << VelAbs << " exp.abs = " << mVelAbsTolerance << std::endl;
std::cout << " PRESS.: ratio = " << PrRatio <<"; exp.ratio = " << mPrRatioTolerance << " abs = " << PrAbs << " exp.abs = " << mPrAbsTolerance << std::endl;
}
if ( (VelRatio <= mVelRatioTolerance || VelAbs <= mVelAbsTolerance) &&
(PrRatio <= mPrRatioTolerance || PrAbs <= mPrAbsTolerance) )
{
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0)
{
std::cout << "*** CONVERGENCE IS ACHIEVED ***" << std::endl;
}
return true;
}
else
{
return false;
}
}
else //in this case all the displacements are imposed!
{
return true;
}
}
/// Initialize this class before using it
/**
* @param rModelPart Reference to the ModelPart containing the fluid problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
}
void InitializeSolutionStep( ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b ) override
{}
void FinalizeSolutionStep( ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b ) override
{}
///@} // Operations
private:
TDataType mVelRatioTolerance;
TDataType mVelAbsTolerance;
TDataType mPrRatioTolerance;
TDataType mPrAbsTolerance;
};
///@} // Kratos classes
///@} // Application group
}
#endif /* _VEL_PR_CRITERIA_H */
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
int volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / 32);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // Heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
multigrid.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
//#include "multigrid.h"
#include "utils.h"
// This is a readaptation of Martin J. White's code, available at https://github.com/martinjameswhite/recon_code
// C++ dependencies have been removed, solver parameters e.g. niterations exposed
// Grid can be non-cubic, with a cell size different along each direction (but why would we want that?)
// los can be global (to test the algorithm in the plane-parallel limit)
// The multigrid code for solving our modified Poisson-like equation.
// See the notes for details.
// The only place that the equations appear explicitly is in
// gauss_seidel, jacobi and residual
//
//
// The finite difference equation we are solving is written schematically
// as A.v=f where A is the matrix operator formed from the discretized
// partial derivatives and f is the source term (density in our case).
// The matrix, A, is never explicitly constructed.
// The solution, phi, is in v.
//
//
// Author: Martin White (UCB/LBNL)
// Written: 20-Apr-2015
// Modified: 20-Apr-2015
//
/*
int get_num_threads()
{
//Calculate number of threads
int num_threads=0;
#pragma omp parallel
{
#pragma omp atomic
num_threads++;
}
return num_threads;
}
*/
void jacobi(FLOAT *v, const FLOAT *f, const int* nmesh, const FLOAT* boxsize, const FLOAT* boxcenter, const FLOAT beta, const FLOAT damping_factor, const int niterations, const FLOAT* los) {
// Does an update using damped Jacobi. This, and in residual below,
// is where the explicit equation we are solving appears.
// See notes for more details.
const size_t size = nmesh[0]*nmesh[1]*nmesh[2];
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
FLOAT* jac = (FLOAT *) malloc(size*sizeof(FLOAT));
FLOAT cell, cell2[NDIM], icell2[NDIM], offset[NDIM], losn[NDIM];
for (int idim=0; idim<NDIM; idim++) {
cell = boxsize[idim]/nmesh[idim];
cell2[idim] = cell*cell;
icell2[idim] = 1./cell2[idim];
offset[idim] = (boxcenter[idim] - boxsize[idim]/2.)/cell;
if (los != NULL) losn[idim] = los[idim]/cell;
}
for (int iter=0; iter<niterations; iter++) {
#pragma omp parallel for shared(v,f,jac)
for (int ix=0; ix<nmesh[0]; ix++) {
FLOAT px = (los == NULL) ? ix + offset[0] : losn[0];
size_t ix0 = nmeshyz*ix;
size_t ixp = nmeshyz*((ix+1) % nmesh[0]);
size_t ixm = nmeshyz*((ix-1+nmesh[0]) % nmesh[0]);
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT py = (los == NULL) ? iy + offset[1] : losn[1];
size_t iy0 = nmeshz*iy;
size_t iyp = nmeshz*((iy+1) % nmesh[1]);
size_t iym = nmeshz*((iy-1+nmesh[1]) % nmesh[1]);
for (int iz0=0; iz0<nmesh[2]; iz0++) {
FLOAT pz = (los == NULL) ? iz0 + offset[2] : losn[2];
FLOAT g = beta/(cell2[0]*px*px+cell2[1]*py*py+cell2[2]*pz*pz);
FLOAT gpx2 = icell2[0] + g*px*px;
FLOAT gpy2 = icell2[1] + g*py*py;
FLOAT gpz2 = icell2[2] + g*pz*pz;
size_t izp = (iz0+1) % nmesh[2];
size_t izm = (iz0-1+nmesh[2]) % nmesh[2];
size_t ii = ix0 + iy0 + iz0;
jac[ii] = f[ii]+
gpx2*(v[ixp+iy0+iz0]+v[ixm+iy0+iz0])+
gpy2*(v[ix0+iyp+iz0]+v[ix0+iym+iz0])+
gpz2*(v[ix0+iy0+izp]+v[ix0+iy0+izm])+
g/2*(px*py*(v[ixp+iyp+iz0]+v[ixm+iym+iz0]
-v[ixm+iyp+iz0]-v[ixp+iym+iz0])+
px*pz*(v[ixp+iy0+izp]+v[ixm+iy0+izm]
-v[ixm+iy0+izp]-v[ixp+iy0+izm])+
py*pz*(v[ix0+iyp+izp]+v[ix0+iym+izm]
-v[ix0+iym+izp]-v[ix0+iyp+izm]));
if (los == NULL) {
jac[ii] += g*(px*(v[ixp+iy0+iz0]-v[ixm+iy0+iz0])+
py*(v[ix0+iyp+iz0]-v[ix0+iym+iz0])+
pz*(v[ix0+iy0+izp]-v[ix0+iy0+izm]));
}
jac[ii] /= 2*(gpx2 + gpy2 + gpz2);
}
}
}
#pragma omp parallel for shared(v,jac)
for (size_t ii=0; ii<size; ii++) v[ii] = (1-damping_factor)*v[ii] + damping_factor*jac[ii];
}
free(jac);
}
void residual(const FLOAT* v, const FLOAT* f, FLOAT* r, const int* nmesh, const FLOAT* boxsize, const FLOAT* boxcenter, const FLOAT beta, const FLOAT* los) {
// Returns the residual, r=f-Av, keeping track of factors of h = boxsize/nmesh
// First compute the operator A on v, keeping track of periodic
// boundary conditions and ignoring the 1/h terms.
// Note the relative signs here and in jacobi (or gauss_seidel).
const size_t size = nmesh[0]*nmesh[1]*nmesh[2];
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
FLOAT cell, cell2[NDIM], icell2[NDIM], offset[NDIM], losn[NDIM];
for (int idim=0; idim<NDIM; idim++) {
cell = boxsize[idim]/nmesh[idim];
cell2[idim] = cell*cell;
icell2[idim] = 1./cell2[idim];
offset[idim] = (boxcenter[idim] - boxsize[idim]/2.)/cell;
if (los != NULL) losn[idim] = los[idim]/cell;
}
#pragma omp parallel for shared(v,r)
for (int ix=0; ix<nmesh[0]; ix++) {
FLOAT px = (los == NULL) ? ix + offset[0] : losn[0];
size_t ix0 = nmeshyz*ix;
size_t ixp = nmeshyz*((ix+1) % nmesh[0]);
size_t ixm = nmeshyz*((ix-1+nmesh[0]) % nmesh[0]);
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT py = (los == NULL) ? iy + offset[1] : losn[1];
size_t iy0 = nmeshz*iy;
size_t iyp = nmeshz*((iy+1) % nmesh[1]);
size_t iym = nmeshz*((iy-1+nmesh[1]) % nmesh[1]);
for (int iz0=0; iz0<nmesh[2]; iz0++) {
FLOAT pz = (los == NULL) ? iz0 + offset[2] : losn[2];
FLOAT g = beta/(cell2[0]*px*px+cell2[1]*py*py+cell2[2]*pz*pz);
FLOAT gpx2 = icell2[0] + g*px*px;
FLOAT gpy2 = icell2[1] + g*py*py;
FLOAT gpz2 = icell2[2] + g*pz*pz;
size_t izp = (iz0+1) % nmesh[2];
size_t izm = (iz0-1+nmesh[2]) % nmesh[2];
size_t ii = ix0 + iy0 + iz0;
r[ii] = 2*(gpx2 + gpy2 + gpz2)*v[ii] -
(gpx2*(v[ixp+iy0+iz0]+v[ixm+iy0+iz0])+
gpy2*(v[ix0+iyp+iz0]+v[ix0+iym+iz0])+
gpz2*(v[ix0+iy0+izp]+v[ix0+iy0+izm])+
g/2*(px*py*(v[ixp+iyp+iz0]+v[ixm+iym+iz0]
-v[ixm+iyp+iz0]-v[ixp+iym+iz0])+
px*pz*(v[ixp+iy0+izp]+v[ixm+iy0+izm]
-v[ixm+iy0+izp]-v[ixp+iy0+izm])+
py*pz*(v[ix0+iyp+izp]+v[ix0+iym+izm]
-v[ix0+iym+izp]-v[ix0+iyp+izm])));
if (los == NULL) {
r[ii] -= g*(px*(v[ixp+iy0+iz0]-v[ixm+iy0+iz0])+
py*(v[ix0+iyp+iz0]-v[ix0+iym+iz0])+
pz*(v[ix0+iy0+izp]-v[ix0+iy0+izm]));
}
}
}
}
// Now subtract it from f
#pragma omp parallel for shared(r,f)
for (size_t ii=0; ii<size; ii++) r[ii] = f[ii] - r[ii];
}
void prolong(const FLOAT* v2h, FLOAT* v1h, const int* nmesh) {
// Transfer a vector, v2h, from the coarse grid with spacing 2h to a
// fine grid with spacing 1h using linear interpolation and periodic BC.
// The length, N, is of the coarse-grid vector, v2h.
// This is simple, linear interpolation in a cube.
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
const size_t nmesh2z = 2*nmesh[2];
const size_t nmesh2yz = 4*nmesh[2]*nmesh[1];
#pragma omp parallel for shared(v2h,v1h)
for (int ix=0; ix<nmesh[0]; ix++) {
size_t ix0 = nmeshyz*ix;
size_t ixp = nmeshyz*((ix+1) % nmesh[0]);
size_t i2x0 = nmesh2yz*2*ix;
size_t i2xp = i2x0 + nmesh2yz;
for (int iy=0; iy<nmesh[1]; iy++) {
size_t iy0 = nmeshz*iy;
size_t iyp = nmeshz*((iy+1) % nmesh[1]);
size_t i2y0 = nmesh2z*2*iy;
size_t i2yp = i2y0 + nmesh2z;
for (int iz0=0; iz0<nmesh[2]; iz0++) {
size_t izp = (iz0+1) % nmesh[2];
size_t i2z0 = 2*iz0;
size_t i2zp = i2z0 + 1;
size_t ii0 = ix0+iy0+iz0;
v1h[i2x0+i2y0+i2z0] = v2h[ii0];
v1h[i2xp+i2y0+i2z0] = (v2h[ii0] + v2h[ixp+iy0+iz0])/2;
v1h[i2x0+i2yp+i2z0] = (v2h[ii0] + v2h[ix0+iyp+iz0])/2;
v1h[i2x0+i2y0+i2zp] = (v2h[ii0] + v2h[ix0+iy0+izp])/2;
v1h[i2xp+i2yp+i2z0] = (v2h[ii0] + v2h[ixp+iy0+iz0]
+ v2h[ix0+iyp+iz0] + v2h[ixp+iyp+iz0])/4;
v1h[i2x0+i2yp+i2zp] = (v2h[ii0] + v2h[ix0+iyp+iz0]
+ v2h[ix0+iy0+izp] + v2h[ix0+iyp+izp])/4;
v1h[i2xp+i2y0+i2zp] = (v2h[ii0] + v2h[ixp+iy0+iz0]
+ v2h[ix0+iy0+izp] + v2h[ixp+iy0+izp])/4;
v1h[i2xp+i2yp+i2zp] = (v2h[ii0] + v2h[ixp+iy0+iz0]
+ v2h[ix0+iyp+iz0] + v2h[ix0+iy0+izp]
+ v2h[ixp+iyp+iz0] + v2h[ixp+iy0+izp]
+ v2h[ix0+iyp+izp] + v2h[ixp+iyp+izp])/8;
}
}
}
}
void reduce(const FLOAT* v1h, FLOAT* v2h, const int* nmesh) {
// Transfer a vector, v1h, from the fine grid with spacing 1h to a coarse
// grid with spacing 2h using full weighting and periodic BC.
// The length, N, is of the fine-grid vector (v1h) and is assumed even,
// the code doesn't check.
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
int nmesh2[NDIM];
for (int idim=0; idim<NDIM; idim++) nmesh2[idim] = nmesh[idim]/2;
const size_t nmesh2z = nmesh2[2];
const size_t nmesh2yz = nmesh2[2]*nmesh2[1];
#pragma omp parallel for shared(v2h,v1h)
for (int ix=0; ix<nmesh2[0]; ix++) {
size_t ix0 = nmeshyz*2*ix;
size_t ixp = nmeshyz*((2*ix+1) % nmesh[0]);
size_t ixm = nmeshyz*((2*ix-1 + nmesh[0]) % nmesh[0]);
for (int iy=0; iy<nmesh2[1]; iy++) {
size_t iy0 = nmeshz*2*iy;
size_t iyp = nmeshz*((2*iy+1) % nmesh[1]);
size_t iym = nmeshz*((2*iy-1 + nmesh[1]) % nmesh[1]);
for (int iz=0; iz<nmesh2[2]; iz++) {
size_t iz0 = 2*iz;
size_t izp = (iz0+1) % nmesh[2];
size_t izm = (iz0-1 + nmesh[2]) % nmesh[2];
v2h[nmesh2yz*ix+nmesh2z*iy+iz] = (8*v1h[ix0+iy0+iz0]+
4*(v1h[ixp+iy0+iz0]+
v1h[ixm+iy0+iz0]+
v1h[ix0+iyp+iz0]+
v1h[ix0+iym+iz0]+
v1h[ix0+iy0+izp]+
v1h[ix0+iy0+izm])+
2*(v1h[ixp+iyp+iz0]+
v1h[ixm+iyp+iz0]+
v1h[ixp+iym+iz0]+
v1h[ixm+iym+iz0]+
v1h[ixp+iy0+izp]+
v1h[ixm+iy0+izp]+
v1h[ixp+iy0+izm]+
v1h[ixm+iy0+izm]+
v1h[ix0+iyp+izp]+
v1h[ix0+iym+izp]+
v1h[ix0+iyp+izm]+
v1h[ix0+iym+izm])+
v1h[ixp+iyp+izp]+
v1h[ixm+iyp+izp]+
v1h[ixp+iym+izp]+
v1h[ixm+iym+izp]+
v1h[ixp+iyp+izm]+
v1h[ixm+iyp+izm]+
v1h[ixp+iym+izm]+
v1h[ixm+iym+izm])/64.0;
}
}
}
}
void vcycle(FLOAT* v, const FLOAT* f, const int* nmesh, const FLOAT* boxsize, const FLOAT* boxcenter, const FLOAT beta, const FLOAT damping_factor, const int niterations, const FLOAT* los) {
// Does one V-cycle, with a recursive strategy, replacing v in the process.
jacobi(v,f,nmesh,boxsize,boxcenter,beta,damping_factor,niterations,los);
const size_t size = nmesh[0]*nmesh[1]*nmesh[2];
_Bool recurse = 1;
for (int idim=0; idim<NDIM; idim++) recurse &= (nmesh[idim] > 4 && (nmesh[idim] % 2 == 0));
if (recurse) {
// Not at coarsest level -- recurse coarser.
int nmesh2[NDIM];
for (int idim=0; idim<NDIM; idim++) nmesh2[idim] = nmesh[idim]/2;
FLOAT* r = (FLOAT *) malloc(size*sizeof(FLOAT));
//FLOAT* r = (FLOAT *) calloc(size,sizeof(FLOAT));
residual(v,f,r,nmesh,boxsize,boxcenter,beta,los);
FLOAT* f2h = (FLOAT *) malloc(size/8*sizeof(FLOAT));
reduce(r,f2h,nmesh);
free(r);
// Make a vector of zeros as our first guess.
FLOAT* v2h = (FLOAT *) calloc(size/8,sizeof(FLOAT));
// and recursively call ourself
vcycle(v2h,f2h,nmesh2,boxsize,boxcenter,beta,damping_factor,niterations,los);
free(f2h);
// take the residual and prolong it back to the finer grid
FLOAT* v1h = (FLOAT *) malloc(size*sizeof(FLOAT));
prolong(v2h,v1h,nmesh2);
free(v2h);
// and correct our earlier guess.
for (size_t ii=0; ii<size; ii++) v[ii] += v1h[ii];
free(v1h);
}
jacobi(v,f,nmesh,boxsize,boxcenter,beta,damping_factor,niterations,los);
}
FLOAT* fmg(FLOAT* f1h, FLOAT* v1h, const int* nmesh, const FLOAT* boxsize, const FLOAT* boxcenter, const FLOAT beta,
const FLOAT jacobi_damping_factor, const int jacobi_niterations, const int vcycle_niterations, const FLOAT* los) {
// The full multigrid cycle, also done recursively.
//printf("NUMTHREADS %d\n", get_num_threads());
const size_t size = nmesh[0]*nmesh[1]*nmesh[2];
_Bool recurse = 1;
for (int idim=0; idim<NDIM; idim++) recurse &= (nmesh[idim] > 4 && (nmesh[idim] % 2 == 0));
if (recurse) {
// Recurse to a coarser grid.
int nmesh2[NDIM];
for (int idim=0; idim<NDIM; idim++) nmesh2[idim] = nmesh[idim]/2;
FLOAT* f2h = (FLOAT *) malloc(size/8*sizeof(FLOAT));
reduce(f1h,f2h,nmesh);
FLOAT *v2h = fmg(f2h,NULL,nmesh2,boxsize,boxcenter,beta,jacobi_damping_factor,jacobi_niterations,vcycle_niterations,los);
free(f2h);
if (v1h == NULL) v1h = (FLOAT *) calloc(size,sizeof(FLOAT));
prolong(v2h,v1h,nmesh2);
free(v2h);
}
else {
// Start with a guess of zero
if (v1h == NULL) v1h = (FLOAT *) calloc(size,sizeof(FLOAT));
}
for (int iter=0; iter<vcycle_niterations; iter++) vcycle(v1h,f1h,nmesh,boxsize,boxcenter,beta,jacobi_damping_factor,jacobi_niterations,los);
return v1h;
}
|
phantom_g5.c | #include <stdio.h>
#include <math.h>
#include <assert.h>
#include <immintrin.h>
#include <xmmintrin.h>
#include <emmintrin.h>
#include "avx_type.h"
#include "gp5util.h"
#define NUM_PIPE (4)
#ifndef MAXDEV
#define MAXDEV (128)
#endif /* MAXDEV */
static double Eps;
static struct Ptcl_Mem {
Ipdata iptcl;
Fodata fout;
Jpdata jptcl[JMEMSIZE];
Jpdata0 jptcl0[JMEMSIZE/2];
int nbody, pad[15];
} ptcl_mem[MAXDEV] ALIGN64;
static float Acc_correct = 1.0;
static float Pot_correct = -1.0;
static __m128 Acc_correctV;
static __m128 Pot_correctV;
int g5_get_number_of_pipelines(void)
{
return NUM_PIPE;
}
int g5_get_jmemsize(void)
{
return JMEMSIZE;
}
void g5_open(void)
{
static int init_call = 1;
if(init_call) {
double rsqrt_bias();
double bias = rsqrt_bias();
float acc_corr = 1.0 - 3.0*bias;
float pot_corr = -(1.0-bias);
Acc_correct = acc_corr;
Pot_correct = pot_corr;
Acc_correctV = _mm_set_ps(acc_corr,acc_corr,acc_corr,acc_corr);
Pot_correctV = _mm_set_ps(pot_corr,pot_corr,pot_corr,pot_corr);
init_call = 0;
}
return;
}
void g5_close()
{
return;
}
void g5_set_eps_to_all(double eps)
{
Eps = eps;
}
void g5_set_range(double xmin, double xmax, double mmin)
{
return;
}
void g5_set_nMC(int devid, int n)
{
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
pm->nbody = n;
}
void g5_set_n(int n)
{
g5_set_nMC(0, n);
}
void g5_set_xiMC(int devid, int ni, double (*xi)[3])
{
int i;
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
assert(ni <= NUM_PIPE);
for(i=0;i<ni;i++) {
float eps2 = Eps*Eps;
pm->iptcl.x[i] = (float)xi[i][0];
pm->iptcl.y[i] = (float)xi[i][1];
pm->iptcl.z[i] = (float)xi[i][2];
pm->iptcl.eps2[i] = eps2;
}
}
void g5_set_xiMC0(int devid, int ni, double (*xi)[3],
double *eps2)
{
int i;
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
assert(ni <= NUM_PIPE);
for(i=0;i<ni;i++) {
pm->iptcl.x[i] = (float)xi[i][0];
pm->iptcl.y[i] = (float)xi[i][1];
pm->iptcl.z[i] = (float)xi[i][2];
pm->iptcl.eps2[i] = eps2[i];
}
}
void g5_set_xi(int ni, double (*xi)[3])
{
g5_set_xiMC(0, ni, xi);
}
void g5_set_xmjMC(int devid, int adr, int nj, double (*xj)[3], double *mj)
{
int j;
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
for(j=adr;j<adr+nj;j++) {
__m256d pd = {xj[j][0], xj[j][1], xj[j][2], mj[j]};
__m128 ps = _mm256_cvtpd_ps(pd);
*(__m128 *)(pm->jptcl+j) = ps;
}
int rsdl = (NUNROLL - (nj % NUNROLL)) % NUNROLL;
for(j=nj;j<nj+rsdl;j++){
__m256d pd = {0.0, 0.0, 0.0, 0.0};
__m128 ps = _mm256_cvtpd_ps(pd);
*(__m128 *)(pm->jptcl+j) = ps;
}
}
void g5_set_xmjMC0(int devid, int adr, int nj, double (*xj)[3],
double *mj, double *epsj2)
{
int j;
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
assert(adr % 2 == 0);
for(j=adr;j<adr+nj;j+=2) {
int jadr = j / 2;
pm->jptcl0[jadr].xm[0][0] = (float)xj[j][0];
pm->jptcl0[jadr].xm[0][1] = (float)xj[j][1];
pm->jptcl0[jadr].xm[0][2] = (float)xj[j][2];
pm->jptcl0[jadr].xm[0][3] = (float)mj[j];
pm->jptcl0[jadr].xm[1][0] = (float)xj[j+1][0];
pm->jptcl0[jadr].xm[1][1] = (float)xj[j+1][1];
pm->jptcl0[jadr].xm[1][2] = (float)xj[j+1][2];
pm->jptcl0[jadr].xm[1][3] = (float)mj[j+1];
pm->jptcl0[jadr].ep[0][0] = (float)epsj2[j];
pm->jptcl0[jadr].ep[0][1] = (float)epsj2[j];
pm->jptcl0[jadr].ep[0][2] = (float)epsj2[j];
pm->jptcl0[jadr].ep[0][3] = (float)epsj2[j];
pm->jptcl0[jadr].ep[1][0] = (float)epsj2[j+1];
pm->jptcl0[jadr].ep[1][1] = (float)epsj2[j+1];
pm->jptcl0[jadr].ep[1][2] = (float)epsj2[j+1];
pm->jptcl0[jadr].ep[1][3] = (float)epsj2[j+1];
}
int rsdl = (NUNROLL - (nj % NUNROLL)) % NUNROLL;
for(j=nj;j<nj+rsdl;j+=2){
int jj, jadr = j / 2;
for(jj = 0; jj < 2; jj++){
int jp = jadr * 2 + jj;
if(jp < nj)
continue;
pm->jptcl0[jadr].xm[jj][0] = 0.0f;
pm->jptcl0[jadr].xm[jj][1] = 0.0f;
pm->jptcl0[jadr].xm[jj][2] = 0.0f;
pm->jptcl0[jadr].xm[jj][3] = 0.0f;
pm->jptcl0[jadr].ep[jj][0] = 1.0f;
pm->jptcl0[jadr].ep[jj][1] = 1.0f;
pm->jptcl0[jadr].ep[jj][2] = 1.0f;
pm->jptcl0[jadr].ep[jj][3] = 1.0f;
}
}
}
void g5_set_xmj(int adr, int nj, double (*xj)[3], double *mj)
{
g5_set_xmjMC(0, adr, nj, xj, mj);
}
void g5_set_xmj0(int adr, int nj, double (*xj)[3],
double *mj, double *epsj2)
{
g5_set_xmjMC0(0, adr, nj, xj, mj, epsj2);
}
void g5_runMC(int devid)
{
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
void GravityKernel(pIpdata, pFodata, pJpdata, int);
GravityKernel(&(pm->iptcl), &(pm->fout), pm->jptcl, pm->nbody);
}
void g5_runMC0(int devid)
{
assert(devid < MAXDEV);
struct Ptcl_Mem *pm = ptcl_mem + devid;
void GravityKernel0(pIpdata, pFodata, pJpdata0, int);
GravityKernel0(&(pm->iptcl), &(pm->fout), pm->jptcl0, pm->nbody);
}
void g5_run(void)
{
g5_runMC(0);
}
void g5_get_forceMC(int devid, int ni, double (*a)[3], double *pot)
{
assert(devid < MAXDEV);
assert(ni <= NUM_PIPE);
struct Ptcl_Mem *pm = ptcl_mem + devid;
int i;
#if 1
*(__m128 *)pm->fout.ax =
_mm_mul_ps(*(__m128 *)(pm->fout.ax),Acc_correctV);
*(__m128 *)pm->fout.ay =
_mm_mul_ps(*(__m128 *)(pm->fout.ay),Acc_correctV);
*(__m128 *)pm->fout.az =
_mm_mul_ps(*(__m128 *)(pm->fout.az),Acc_correctV);
*(__m128 *)pm->fout.phi =
_mm_mul_ps(*(__m128 *)(pm->fout.phi),Pot_correctV);
#endif
for(i=0;i<ni;i++) {
a[i][0] = (double)(pm->fout.ax[i]);
a[i][1] = (double)(pm->fout.ay[i]);
a[i][2] = (double)(pm->fout.az[i]);
pot[i] = (double)(pm->fout.phi[i]);
}
}
void g5_get_force(int ni, double (*a)[3], double *pot)
{
g5_get_forceMC(0, ni, a, pot);
}
void g5_calculate_force_on_xMC(int devid, double (*x)[3], double (*a)[3],
double *p, int ni)
{
assert(devid < MAXDEV);
int off;
int np = g5_get_number_of_pipelines();
for(off=0;off<ni;off+=np) {
int nii = np < ni-off ? np : ni-off;
g5_set_xiMC(devid, nii, x+off);
g5_runMC(devid);
g5_get_forceMC(devid, nii, a+off, p+off);
}
}
void g5_calculate_force_on_xMC0(int devid, double (*x)[3],
double (*a)[3], double *p,
int ni, double *eps2)
{
assert(devid < MAXDEV);
int off;
int np = g5_get_number_of_pipelines();
for(off=0;off<ni;off+=np) {
int nii = np < ni-off ? np : ni-off;
g5_set_xiMC0(devid, nii, x+off, eps2+off);
g5_runMC0(devid);
g5_get_forceMC(devid, nii, a+off, p+off);
}
}
#ifndef ENABLE_OPENMP
void g5_calculate_force_on_x(double (*x)[3], double (*a)[3], double *p, int ni)
{
g5_calculate_force_on_xMC(0, x, a, p, ni);
}
void g5_calculate_force_on_x0(double (*x)[3], double (*a)[3],
double *p, int ni, double *eps2)
{
g5_calculate_force_on_xMC0(0, x, a, p, ni, eps2);
}
#else
#include <omp.h>
void g5_calculate_force_on_x(double (*x)[3], double (*a)[3], double *p,
int nitot)
{
int off;
const int np = g5_get_number_of_pipelines();
#pragma omp parallel for
for(off=0; off<nitot; off+=np) {
int tid = omp_get_thread_num();
int ni = np < nitot-off ? np : nitot-off;
g5_set_xiMC(tid, ni, x+off);
{
void GravityKernel(pIpdata, pFodata, pJpdata, int);
pIpdata ip = &ptcl_mem[tid].iptcl;
pFodata fo = &ptcl_mem[tid].fout;
pJpdata jp = ptcl_mem[0].jptcl;
int nbody = ptcl_mem[0].nbody;
GravityKernel(ip, fo, jp, nbody);
}
g5_get_forceMC(tid, ni, a+off, p+off);
}
}
void g5_calculate_force_on_x0(double (*x)[3], double (*a)[3], double *p,
int nitot, double *eps2)
{
int off;
const int np = g5_get_number_of_pipelines();
#pragma omp parallel for
for(off=0; off<nitot; off+=np) {
int tid = omp_get_thread_num();
int ni = np < nitot-off ? np : nitot-off;
g5_set_xiMC0(tid, ni, x+off, eps2+off);
{
void GravityKernel0(pIpdata, pFodata, pJpdata0, int);
pIpdata ip = &ptcl_mem[tid].iptcl;
pFodata fo = &ptcl_mem[tid].fout;
pJpdata0 jp = ptcl_mem[0].jptcl0;
int nbody = ptcl_mem[0].nbody;
GravityKernel0(ip, fo, jp, nbody);
}
g5_get_forceMC(tid, ni, a+off, p+off);
}
}
#endif
|
GB_binop__le_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__le_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__le_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint32)
// A*D function (colscale): GB (_AxD__le_uint32)
// D*A function (rowscale): GB (_DxB__le_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__le_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__le_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint32)
// C=scalar+B GB (_bind1st__le_uint32)
// C=scalar+B' GB (_bind1st_tran__le_uint32)
// C=A+scalar GB (_bind2nd__le_uint32)
// C=A'+scalar GB (_bind2nd_tran__le_uint32)
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT32 || GxB_NO_LE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CPUImplQPU.h | /*
Copyright (c) 2017-2020 Origin Quantum Computing. All Right Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef CPU_QUANTUM_GATE_H
#define CPU_QUANTUM_GATE_H
#include "Core/VirtualQuantumProcessor/QPUImpl.h"
#include "Core/Utilities/Tools/Utils.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#ifndef SQ2
#define SQ2 (1 / 1.4142135623731)
#endif
#ifndef PI
#define PI 3.14159265358979323846
#endif
#define DECL_GATE_MATRIX(NAME)\
extern const qcomplex_t NAME##00;\
extern const qcomplex_t NAME##01;\
extern const qcomplex_t NAME##10;\
extern const qcomplex_t NAME##11;
#define DECL_ANGLE_GATE_MATRIX(NAME)\
extern const double NAME##_Nx;\
extern const double NAME##_Ny;\
extern const double NAME##_Nz;\
#define REGISTER_GATE_MATRIX(NAME,U00,U01,U10,U11)\
extern const qcomplex_t NAME##00 = U00;\
extern const qcomplex_t NAME##01 = U01;\
extern const qcomplex_t NAME##10 = U10;\
extern const qcomplex_t NAME##11 = U11;
#define REGISTER_ANGLE_GATE_MATRIX(NAME,Nx,Ny,Nz)\
extern const double NAME##_Nx = Nx;\
extern const double NAME##_Ny = Ny;\
extern const double NAME##_Nz = Nz;\
#define CONST_GATE(NAME) \
QError \
NAME(size_t qn, bool isConjugate, double error_rate)\
{ \
const_single_qubit_gate(NAME, qn,isConjugate,error_rate);\
return qErrorNone; \
}
#define CONTROL_CONST_GATE(NAME) \
QError \
NAME(size_t qn, Qnum& vControlBit,bool isConjugate , double error_rate)\
{ \
control_const_single_qubit_gate(NAME, qn,vControlBit,isConjugate,error_rate);\
return qErrorNone; \
}
#define SINGLE_ANGLE_GATE(NAME) \
QError \
NAME(size_t qn,double theta,bool isConjugate, double error_rate)\
{ \
single_qubit_angle_gate(NAME, qn,theta,isConjugate,error_rate);\
return qErrorNone; \
}
#define CONTROL_SINGLE_ANGLE_GATE(NAME) \
QError \
NAME(size_t qn, double theta,Qnum& vControlBit,bool isConjugate, double error_rate)\
{ \
control_single_qubit_angle_gate(NAME, qn, theta,vControlBit,isConjugate, error_rate); \
return qErrorNone; \
}
#define const_single_qubit_gate(GATE_NAME,qn,isConjugate,error_rate) \
single_gate<GATE_NAME##00,GATE_NAME##01,GATE_NAME##10,GATE_NAME##11>(qn,isConjugate,error_rate)
#define control_const_single_qubit_gate(GATE_NAME,qn,vControlBit,isConjugate,error_rate) \
control_single_gate<GATE_NAME##00,GATE_NAME##01,GATE_NAME##10,GATE_NAME##11>\
(qn,vControlBit,isConjugate,error_rate)
#define single_qubit_angle_gate(GATE_NAME,qn,theta,isConjugate,error_rate) \
single_angle_gate<GATE_NAME##_Nx,GATE_NAME##_Ny,GATE_NAME##_Nz>(qn,theta,isConjugate,error_rate)
#define control_single_qubit_angle_gate(GATE_NAME,qn,theta,vControlBit,isConjugate,error_rate) \
control_single_angle_gate<GATE_NAME##_Nx,GATE_NAME##_Ny,GATE_NAME##_Nz> \
(qn,theta,vControlBit,isConjugate,error_rate)
DECL_GATE_MATRIX(Hadamard)
DECL_GATE_MATRIX(X)
DECL_GATE_MATRIX(Y)
DECL_GATE_MATRIX(Z)
DECL_GATE_MATRIX(T)
DECL_GATE_MATRIX(S)
DECL_GATE_MATRIX(P0)
DECL_GATE_MATRIX(P1)
DECL_ANGLE_GATE_MATRIX(RX_GATE)
DECL_ANGLE_GATE_MATRIX(RY_GATE)
DECL_ANGLE_GATE_MATRIX(RZ_GATE)
/**
* @brief QPU implementation by CPU model
* @ingroup VirtualQuantumProcessor
*/
class CPUImplQPU : public QPUImpl
{
public:
vQParam qubit2stat;
vQParam init_qubit2stat;
QGateParam & findgroup(size_t qn);
CPUImplQPU();
CPUImplQPU(size_t);
~CPUImplQPU();
inline bool TensorProduct(QGateParam& qgroup0, QGateParam& qgroup1)
{
if (qgroup0.qVec[0] == qgroup1.qVec[0])
{
return false;
}
size_t length_0 = qgroup0.qstate.size();
size_t length_1 = qgroup1.qstate.size();
int index = 0;
QStat new_state;
new_state.resize(length_0 * length_1);
#pragma omp parallel for private(index)
for (int i = 0; i < length_1; i++)
{
for (int j = 0; j < length_0; j++)
{
index = i * length_0 + j;
new_state[index] = qgroup0.qstate[j] * qgroup1.qstate[i];
}
}
qgroup0.qstate = new_state;
qgroup0.qVec.insert(qgroup0.qVec.end(), qgroup1.qVec.begin(), qgroup1.qVec.end());
qgroup1.enable = false;
return true;
}
template<const qcomplex_t& U00, const qcomplex_t& U01, const qcomplex_t& U10, const qcomplex_t& U11>
QError single_gate(size_t qn, bool isConjugate, double error_rate)
{
qcomplex_t alpha;
qcomplex_t beta;
QGateParam& qgroup = findgroup(qn);
size_t j;
size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin();
qcomplex_t C00 = U00;
qcomplex_t C01 = U01;
qcomplex_t C10 = U10;
qcomplex_t C11 = U11;
if (isConjugate)
{
qcomplex_t temp;
C00 = qcomplex_t(C00.real(), -C00.imag());
C01 = qcomplex_t(C01.real(), -C01.imag());
C10 = qcomplex_t(C10.real(), -C10.imag());
C11 = qcomplex_t(C11.real(), -C11.imag());
temp = C01;;
C01 = U10;
C10 = temp;
}
//#pragma omp parallel for private(j,alpha,beta)
for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2)
{
for (j = i; j < i + ststep; j++)
{
alpha = qgroup.qstate[j];
beta = qgroup.qstate[j + ststep];
qgroup.qstate[j] = C00 * alpha + C01 * beta; /* in j,the goal qubit is in |0> */
qgroup.qstate[j + ststep] = C10 * alpha + C11 * beta; /* in j+ststep,the goal qubit is in |1> */
}
}
return qErrorNone;
}
QError U1_GATE(size_t qn, double theta, bool isConjugate, double error_rate)
{
QGateParam& qgroup = findgroup(qn);
size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin();
qcomplex_t C00 = (1, 0);
qcomplex_t C01 = (0, 0);
qcomplex_t C10 = (0, 0);
qcomplex_t C11 = isConjugate ? qcomplex_t(cos(-theta), sin(-theta)) : qcomplex_t(cos(theta), sin(theta));
for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2)
{
for (size_t j = i; j < i + ststep; ++j)
{
qgroup.qstate[j + ststep] = C11 * qgroup.qstate[j + ststep];
}
}
return qErrorNone;
}
template<const double& Nx, const double& Ny, const double& Nz>
QError single_angle_gate(size_t qn, double theta, bool isConjugate, double error_rate)
{
qcomplex_t alpha;
qcomplex_t beta;
qcomplex_t U00(cos(theta / 2), -sin(theta / 2)*Nz);
qcomplex_t U01(-sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U10(sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U11(cos(theta / 2), sin(theta / 2)*Nz);
if (isConjugate)
{
qcomplex_t temp;
U00 = qcomplex_t(U00.real(), -U00.imag());
U01 = qcomplex_t(U01.real(), -U01.imag());
U10 = qcomplex_t(U10.real(), -U10.imag());
U11 = qcomplex_t(U11.real(), -U11.imag());
temp = U01;
U01 = U10;
U10 = temp;
}
QGateParam& qgroup = findgroup(qn);
size_t j;
size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin();
//#pragma omp parallel for private(j,alpha,beta)
for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2)
{
for (j = i; j < i + ststep; j++)
{
alpha = qgroup.qstate[j];
beta = qgroup.qstate[j + ststep];
qgroup.qstate[j] = U00 * alpha + U01 * beta; /* in j,the goal qubit is in |0> */
qgroup.qstate[j + ststep] = U10 * alpha + U11 * beta; /* in j+ststep,the goal qubit is in |1> */
}
}
return qErrorNone;
}
template<const double& Nx, const double& Ny, const double& Nz>
QError control_single_angle_gate(size_t qn,
double theta,
Qnum vControlBit,
bool isConjugate,
double error_rate)
{
if (QPanda::RandomNumberGenerator() > error_rate)
{
QGateParam& qgroup0 = findgroup(qn);
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
TensorProduct(qgroup0, findgroup(*iter));
}
size_t M = 1ull << (qgroup0.qVec.size() - vControlBit.size());
size_t x;
size_t n = qgroup0.qVec.size();
size_t ststep = 1ull << (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), qn)
- qgroup0.qVec.begin());
size_t index = 0;
size_t block = 0;
qcomplex_t alpha, beta;
qcomplex_t U00(cos(theta / 2), -sin(theta / 2)*Nz);
qcomplex_t U01(-sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U10(sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U11(cos(theta / 2), sin(theta / 2)*Nz);
if (isConjugate)
{
qcomplex_t temp;
U00 = qcomplex_t(U00.real(), -U00.imag());
U01 = qcomplex_t(U01.real(), -U01.imag());
U10 = qcomplex_t(U10.real(), -U10.imag());
U11 = qcomplex_t(U11.real(), -U11.imag());
temp = U01;
U01 = U10;
U10 = temp;
}
Qnum qvtemp;
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
size_t stemp = (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), *iter)
- qgroup0.qVec.begin());
block += 1ull << stemp;
qvtemp.push_back(stemp);
}
sort(qvtemp.begin(), qvtemp.end());
Qnum::iterator qiter;
size_t j;
//#pragma omp parallel for private(j,alpha,beta,index,x,qiter)
for (size_t i = 0; i < M; i++)
{
index = 0;
x = i;
qiter = qvtemp.begin();
for (j = 0; j < n; j++)
{
while (qiter != qvtemp.end() && *qiter == j)
{
qiter++;
j++;
}
//index += ((x % 2)*(1ull << j));
index += ((x & 1) << j);
x >>= 1;
}
/*
* control qubits are 1,target qubit is 0
*/
index = index + block - ststep;
alpha = qgroup0.qstate[index];
beta = qgroup0.qstate[index + ststep];
qgroup0.qstate[index] = alpha * U00 + beta * U01;
qgroup0.qstate[index + ststep] = alpha * U10 + beta * U11;
}
}
return qErrorNone;
}
template<const qcomplex_t& U00,
const qcomplex_t& U01,
const qcomplex_t& U10,
const qcomplex_t& U11>
QError control_single_gate(
size_t qn,
Qnum vControlBit,
bool isConjugate,
double error_rate)
{
if (QPanda::RandomNumberGenerator() > error_rate)
{
QGateParam& qgroup0 = findgroup(qn);
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
TensorProduct(qgroup0, findgroup(*iter));
}
size_t M = 1ull << (qgroup0.qVec.size() - vControlBit.size());
size_t x;
size_t n = qgroup0.qVec.size();
size_t ststep = 1ull << (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), qn)
- qgroup0.qVec.begin());
size_t index = 0;
size_t block = 0;
qcomplex_t alpha, beta;
qcomplex_t C00 = U00;
qcomplex_t C01 = U01;
qcomplex_t C10 = U10;
qcomplex_t C11 = U11;
if (isConjugate)
{
qcomplex_t temp;
C00 = qcomplex_t(C00.real(), -C00.imag());
C01 = qcomplex_t(C01.real(), -C01.imag());
C10 = qcomplex_t(C10.real(), -C10.imag());
C11 = qcomplex_t(C11.real(), -C11.imag());
temp = C01;
C01 = U10;
C10 = temp;
}
Qnum qvtemp;
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
size_t stemp = (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), *iter)
- qgroup0.qVec.begin());
block += 1ull << stemp;
qvtemp.push_back(stemp);
}
sort(qvtemp.begin(), qvtemp.end());
Qnum::iterator qiter;
size_t j;
//#pragma omp parallel for private(j,alpha,beta,index,x,qiter)
for (size_t i = 0; i < M; i++)
{
index = 0;
x = i;
qiter = qvtemp.begin();
for (j = 0; j < n; j++)
{
while (qiter != qvtemp.end() && *qiter == j)
{
qiter++;
j++;
}
//index += ((x % 2)*(1ull << j));
index += ((x & 1) << j);
x >>= 1;
}
/*
* control qubits are 1,target qubit is 0
*/
index = index + block - ststep;
alpha = qgroup0.qstate[index];
beta = qgroup0.qstate[index + ststep];
qgroup0.qstate[index] = alpha * C00 + beta * C01;
qgroup0.qstate[index + ststep] = alpha * C10 + beta * C11;
}
}
return qErrorNone;
}
//single qubit gate and control-single qubit gate
CONST_GATE(P0);
CONST_GATE(P1);
CONST_GATE(X);
CONST_GATE(Y);
CONST_GATE(Z);
CONST_GATE(Hadamard);
CONST_GATE(T);
CONST_GATE(S);
SINGLE_ANGLE_GATE(RX_GATE);
SINGLE_ANGLE_GATE(RY_GATE);
SINGLE_ANGLE_GATE(RZ_GATE);
CONTROL_SINGLE_ANGLE_GATE(RX_GATE);
CONTROL_SINGLE_ANGLE_GATE(RY_GATE);
CONTROL_SINGLE_ANGLE_GATE(RZ_GATE);
CONTROL_CONST_GATE(Hadamard);
CONTROL_CONST_GATE(X); //CCCC-NOT
CONTROL_CONST_GATE(Y);
CONTROL_CONST_GATE(Z);
CONTROL_CONST_GATE(T);
CONTROL_CONST_GATE(S);
CONTROL_CONST_GATE(P0);
CONTROL_CONST_GATE(P1);
//define const CNOT,CZ,ISWAP,SQISWAP
inline QError CNOT(size_t qn_0, size_t qn_1,
bool isConjugate, double error_rate)
{
Qnum qvtemp;
qvtemp.push_back(qn_0);
qvtemp.push_back(qn_1);
X(qn_1, qvtemp, isConjugate, error_rate); //qn_1 is target
return qErrorNone;
}
inline QError CNOT(size_t qn_0, size_t qn_1, Qnum& vControlBit,
bool isConjugate, double error_rate)
{
X(qn_1, vControlBit, isConjugate, error_rate); //qn_1 is target
return qErrorNone;
}
QError iSWAP(size_t qn_0, size_t qn_1, double theta,
bool isConjugate, double);
QError iSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit,
double theta, bool isConjugate, double);
inline QError iSWAP(size_t qn_0, size_t qn_1,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, PI / 2, isConjugate, error_rate);
return qErrorNone;
}
inline QError iSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, vControlBit, PI / 2, isConjugate, error_rate);
return qErrorNone;
}
inline QError SqiSWAP(size_t qn_0, size_t qn_1,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, PI / 4, isConjugate, error_rate);
return qErrorNone;
}
inline QError SqiSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, vControlBit, PI / 4, isConjugate, error_rate);
return qErrorNone;
}
QError CR(size_t qn_0, size_t qn_1,
double theta, bool isConjugate, double error_rate);
QError CR(size_t qn_0, size_t qn_1, Qnum& vControlBit,
double theta, bool isConjugate, double error_rate);
inline QError CZ(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
CR(qn_0, qn_1, PI, isConjugate, error_rate);
return qErrorNone;
}
inline QError CZ(size_t qn_0, size_t qn_1, Qnum& vControlBit, bool isConjugate, double error_rate)
{
CR(qn_0, qn_1, vControlBit, PI, isConjugate, error_rate);
return qErrorNone;
}
//define unitary single/double quantum gate
QError unitarySingleQubitGate(size_t qn,
QStat& matrix, bool isConjugate,
GateType);
QError controlunitarySingleQubitGate(size_t qn, Qnum& vControlBit,
QStat& matrix, bool isConjugate,
GateType);
QError unitaryDoubleQubitGate(size_t qn_0, size_t qn_1,
QStat& matrix, bool isConjugate,
GateType);
QError controlunitaryDoubleQubitGate(size_t qn_0, size_t qn_1, Qnum& vControlBit,
QStat& matrix, bool isConjugate,
GateType);
QError DiagonalGate(Qnum& vQubit, QStat & matrix,
bool isConjugate, double error_rate);
QError controlDiagonalGate(Qnum& vQubit, QStat & matrix, Qnum& vControlBit,
bool isConjugate, double error_rate);
QStat getQState();
QError Reset(size_t qn);
bool qubitMeasure(size_t qn);
QError pMeasure(Qnum& qnum, prob_tuple &mResult,
int select_max = -1);
QError pMeasure(Qnum& qnum, prob_vec &mResult);
QError initState(size_t head_rank, size_t rank_size, size_t qubit_num);
QError initState(size_t qubit_num, const QStat &state = {});
inline QError P00(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
QStat P00_matrix = { 1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,0 };
return unitaryDoubleQubitGate(qn_0, qn_1, P00_matrix, isConjugate,GateType::P00_GATE);
}
inline QError SWAP(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
QStat P00_matrix = { 1,0,0,0,
0,0,1,0,
0,1,0,0,
0,0,0,1 };
return unitaryDoubleQubitGate(qn_0, qn_1, P00_matrix, isConjugate, GateType::SWAP_GATE);
}
inline QError P11(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
QStat P11_matrix = { 0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,1 };
return unitaryDoubleQubitGate(qn_0, qn_1, P11_matrix, isConjugate,GateType::P11_GATE);
}
};
class CPUImplQPUWithOracle : public CPUImplQPU {
public:
QError controlOracularGate(std::vector<size_t> bits,
std::vector<size_t> controlbits,
bool is_dagger,
std::string name);
};
#endif
|
Parallelizer.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
namespace Eigen {
namespace internal {
/** \internal */
inline void manage_multi_threading(Action action, int* v)
{
static EIGEN_UNUSED int m_maxThreads = -1;
if(action==SetAction)
{
eigen_internal_assert(v!=0);
m_maxThreads = *v;
}
else if(action==GetAction)
{
eigen_internal_assert(v!=0);
#ifdef EIGEN_HAS_OPENMP
if(m_maxThreads>0)
*v = m_maxThreads;
else
*v = omp_get_max_threads();
#else
*v = 1;
#endif
}
else
{
eigen_internal_assert(false);
}
}
}
/** Must be call first when calling Eigen from multiple threads */
inline void initParallel()
{
int nbt;
internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
/** \returns the max number of threads reserved for Eigen
* \sa setNbThreads */
inline int nbThreads()
{
int ret;
internal::manage_multi_threading(GetAction, &ret);
return ret;
}
/** Sets the max number of threads reserved for Eigen
* \sa nbThreads */
inline void setNbThreads(int v)
{
internal::manage_multi_threading(SetAction, &v);
}
namespace internal {
template<typename Index> struct GemmParallelInfo
{
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
Index volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// compute the maximal number of threads from the size of the product:
// This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
Index size = transpose ? rows : cols;
Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
// compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
Eigen::initParallel();
func.initParallelSession(threads);
if(transpose)
std::swap(rows,cols);
ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if(transpose) func(c0, actualBlockCols, 0, rows, info);
else func(0, rows, c0, actualBlockCols, info);
}
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H
|
Example3a.c | #include<stdio.h>
int main(){
int sum = 1;
int i =1;
// increase sum by one each iteratiob using openmp
#pragma omp parallel for private(i) reduction( + : sum )
for (i = i; i < 100; i++) {
sum +=1;
}
printf("sum is %d\n",sum);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
array_args.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_
#define LIGHTGBM_UTILS_ARRAY_AGRS_H_
#include <LightGBM/utils/openmp_wrapper.h>
#include <algorithm>
#include <utility>
#include <vector>
namespace LightGBM {
/*!
* \brief Contains some operation for a array, e.g. ArgMax, TopK.
*/
template<typename VAL_T>
class ArrayArgs {
public:
inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) {
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads);
std::vector<size_t> arg_maxs(num_threads, 0);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t start = step * i;
if (start >= array.size()) { continue; }
size_t end = std::min(array.size(), start + step);
size_t arg_max = start;
for (size_t j = start + 1; j < end; ++j) {
if (array[j] > array[arg_max]) {
arg_max = j;
}
}
arg_maxs[i] = arg_max;
}
size_t ret = arg_maxs[0];
for (int i = 1; i < num_threads; ++i) {
if (array[arg_maxs[i]] > array[ret]) {
ret = arg_maxs[i];
}
}
return ret;
}
inline static size_t ArgMax(const std::vector<VAL_T>& array) {
if (array.empty()) {
return 0;
}
if (array.size() > 1024) {
return ArgMaxMT(array);
} else {
size_t arg_max = 0;
for (size_t i = 1; i < array.size(); ++i) {
if (array[i] > array[arg_max]) {
arg_max = i;
}
}
return arg_max;
}
}
inline static size_t ArgMin(const std::vector<VAL_T>& array) {
if (array.empty()) {
return 0;
}
size_t arg_min = 0;
for (size_t i = 1; i < array.size(); ++i) {
if (array[i] < array[arg_min]) {
arg_min = i;
}
}
return arg_min;
}
inline static size_t ArgMax(const VAL_T* array, size_t n) {
if (n <= 0) {
return 0;
}
size_t arg_max = 0;
for (size_t i = 1; i < n; ++i) {
if (array[i] > array[arg_max]) {
arg_max = i;
}
}
return arg_max;
}
inline static size_t ArgMin(const VAL_T* array, size_t n) {
if (n <= 0) {
return 0;
}
size_t arg_min = 0;
for (size_t i = 1; i < n; ++i) {
if (array[i] < array[arg_min]) {
arg_min = i;
}
}
return arg_min;
}
inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) {
int i = start - 1;
int j = end - 1;
int p = i;
int q = j;
if (start >= end) {
return;
}
std::vector<VAL_T>& ref = *arr;
VAL_T v = ref[end - 1];
for (;;) {
while (ref[++i] > v) {}
while (v > ref[--j]) { if (j == start) { break; } }
if (i >= j) { break; }
std::swap(ref[i], ref[j]);
if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); }
if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); }
}
std::swap(ref[i], ref[end - 1]);
j = i - 1;
i = i + 1;
for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); }
for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); }
*l = j;
*r = i;
}
// Note: k refer to index here. e.g. k=0 means get the max number.
inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) {
if (start >= end - 1) {
return start;
}
int l = start;
int r = end - 1;
Partition(arr, start, end, &l, &r);
// if find or all elements are the same.
if ((k > l && k < r) || (l == start - 1 && r == end - 1)) {
return k;
} else if (k <= l) {
return ArgMaxAtK(arr, start, l + 1, k);
} else {
return ArgMaxAtK(arr, r, end, k);
}
}
// Note: k is 1-based here. e.g. k=3 means get the top-3 numbers.
inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) {
out->clear();
if (k <= 0) {
return;
}
for (auto val : array) {
out->push_back(val);
}
if (static_cast<size_t>(k) >= array.size()) {
return;
}
ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1);
out->erase(out->begin() + k, out->end());
}
inline static void Assign(std::vector<VAL_T>* array, VAL_T t, size_t n) {
array->resize(n);
for (size_t i = 0; i < array->size(); ++i) {
(*array)[i] = t;
}
}
inline static bool CheckAllZero(const std::vector<VAL_T>& array) {
for (size_t i = 0; i < array.size(); ++i) {
if (array[i] != VAL_T(0)) {
return false;
}
}
return true;
}
inline static bool CheckAll(const std::vector<VAL_T>& array, VAL_T t) {
for (size_t i = 0; i < array.size(); ++i) {
if (array[i] != t) {
return false;
}
}
return true;
}
};
} // namespace LightGBM
#endif // LightGBM_UTILS_ARRAY_AGRS_H_
|
fhn_mod.c | #include <stdio.h>
#include "fhn_mod.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
/*
static bool first_call = true;
if(first_call) {
#ifdef _WIN32
printf("Using modified FHN 1961 CPU model\n");
#else
print_to_stdout_and_file("Using modified FHN 1961 CPU model\n");
#endif
first_call = false;
}
*/
sv[0] = 0.000000f; //Vm millivolt
sv[1] = 0.000000f; //v dimensionless
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current);
for(int i = 0; i < NEQ; i++)
sv[i] = dt*rDY[i] + rY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current) {
//State variables
const real u = sv[0];
const real v = sv[1];
const real a = 0.2f;
const real b = 0.5f;
const real k = 36.0;
const real epsilon = 0.000150;
rDY_[0] = k*(u*(1.0f - u)*(u - a) - u*v) + stim_current;
rDY_[1] = k*epsilon*(b*u - v);
}
|
edge_vol_int.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "edge_proxy_common.h"
#include <libxsmm_intrinsics_x86.h>
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
/*#define EDGE_HP_1G*/
/*#define HANDLE_AMOK*/
#if defined(EDGE_HP_1G) || defined(EDGE_HP_2M)
#include <sys/mman.h>
#include <linux/mman.h>
#endif
LIBXSMM_INLINE void* edge_hp_malloc( size_t nbytes, size_t alignment ) {
void* ret_ptr = NULL;
#if defined(EDGE_HP_1G)
size_t num_large_pages = nbytes / (1073741824L);
if ( nbytes > num_large_pages*1073741824L ) {
num_large_pages++;
}
nbytes = (size_t) num_large_pages * 1073741824L;
printf("trying to allocate %ld 1G pages\n", num_large_pages);
/*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/
ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );
if ( (ret_ptr == (void *)(-1)) ) {
fprintf(stderr,"1G mmap call failed\n");
exit(1);
}
#elif defined(EDGE_HP_2M)
size_t num_large_pages = nbytes / (2097152UL);
if ( nbytes > num_large_pages*2097152UL ) {
num_large_pages++;
}
nbytes = (size_t) num_large_pages * 2097152UL;
printf("trying to allocate %ld 2M pages\n", num_large_pages);
/*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/
ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );
if ( (ret_ptr == (void *)(-1)) ) {
fprintf(stderr,"2M mmap call failed\n");
exit(1);
}
#else
ret_ptr = libxsmm_aligned_malloc( nbytes, alignment );
#endif
return ret_ptr;
}
LIBXSMM_INLINE void edge_hp_free( void* ptr, size_t nbytes ) {
LIBXSMM_UNUSED( nbytes );
#if defined(EDGE_HP_1G)
/* to be implemented */
#elif defined(EDGE_HP_2M)
/* to be implemented */
#else
libxsmm_free( ptr );
#endif
}
#if defined(__AVX512F__)
LIBXSMM_INLINE void matMulFusedAC( unsigned short i_r,
unsigned int i_m,
unsigned int i_n,
unsigned int i_k,
unsigned int i_ldA,
unsigned int i_ldB,
unsigned int i_ldC,
double i_beta,
const double *i_a,
const double *i_b,
double *o_c ) {
unsigned int l_m, l_n, l_k;
const __m512d beta = _mm512_set1_pd( i_beta );
LIBXSMM_UNUSED(i_r);
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd();
_mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc);
}
}
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) );
for( l_k = 0; l_k < i_k; l_k++ ) {
const __m512d alpha = _mm512_set1_pd( i_b[l_k*i_ldB + l_n] );
vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc);
}
_mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc );
}
}
}
LIBXSMM_INLINE void matMulFusedBC( unsigned short i_r,
unsigned int i_m,
unsigned int i_n,
unsigned int i_k,
unsigned int i_ldA,
unsigned int i_ldB,
unsigned int i_ldC,
double i_beta,
const double *i_a,
const double *i_b,
double *o_c ) {
unsigned int l_m, l_n, l_k;
const __m512d beta = _mm512_set1_pd( i_beta );
LIBXSMM_UNUSED(i_r);
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd();
_mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc);
}
}
for( l_m = 0; l_m < i_m; l_m++ ) {
for( l_n = 0; l_n < i_n; l_n++ ) {
__m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) );
for( l_k = 0; l_k < i_k; l_k++ ) {
const __m512d alpha = _mm512_set1_pd( i_a[l_m*i_ldA + l_k] );
vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc);
}
_mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc );
}
}
}
#endif
LIBXSMM_INLINE void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) {
double time_avg;
size_t i;
time_avg = 0.0;
for (i = 0; i < i_workers; i++) {
if ( io_amoks[8*i] == 0 ) {
time_avg += i_runtimes[8*i];
}
}
time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers]));
/* let detect amoks */
for (i = 0; i < i_workers; i++) {
if ( io_amoks[8*i] == 0 ) {
if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */
io_amoks[8*i_workers]++;
io_amoks[8*i] = 1;
}
}
}
}
LIBXSMM_INLINE void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) {
size_t l_chunk, l_start, l_end;
size_t l_cur_amoks = i_amoks[8*i_workers];
size_t l_non_amoks = i_workers - l_cur_amoks;
l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1);
if (i_amoks[8*i_mytid] != 0) {
l_start = 0;
l_end = 0;
} else {
size_t l_tid_offset = 0;
size_t l_z;
for ( l_z = 0; l_z < i_mytid; l_z++) {
if ( i_amoks[8*l_z] != 0 ) {
l_tid_offset++;
}
}
l_tid_offset = i_mytid - l_tid_offset;
l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize;
l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize;
}
*io_chunk = l_chunk;
*io_mystart = l_start;
*io_myend = l_end;
}
int main(int argc, char* argv[])
{
char* mat_a = 0;
unsigned int *mat_a_rowptr, *mat_a_colidx;
unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz;
double* mat_a_values;
libxsmm_dmmfunction a_kernel;
char* mat_b = 0;
unsigned int *mat_b_rowptr, *mat_b_colidx;
unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz;
double* mat_b_values;
libxsmm_dmmfunction b_kernel;
char* mat_c = 0;
unsigned int *mat_c_rowptr, *mat_c_colidx;
unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz;
double* mat_c_values;
libxsmm_dmmfunction c_kernel;
char* mat_st = 0;
unsigned int *mat_st_rowptr, *mat_st_colidx;
unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz;
double* mat_st_values;
libxsmm_dmmfunction st_kernel;
int num_modes = 9;
int num_quants = 9;
size_t num_elems = 0;
size_t num_cfr = 8;
size_t num_reps = 1;
size_t elem_size;
/* OpenMP: signed induction variables */
int i, j;
const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0;
libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star;
const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE;
const int flags = LIBXSMM_GEMM_FLAGS('N', 'N');
const double alpha = 1, beta = 1;
double flops_vol;
double* q;
double* qt;
double* qs;
double* star;
double* global;
unsigned long long l_start, l_end;
double l_total;
unsigned int l_num_threads;
unsigned int l_star_ent = num_quants*num_quants;
double* l_total_thread;
double* l_cur_thread_time;
double time_max;
double time_min;
double time_avg;
size_t* amoks;
/* read cmd */
if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) {
printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* some empty lines at the beginning */
printf("\n");
i = 1;
if (argc > (int)i) mat_a = argv[i++];
if (argc > (int)i) mat_b = argv[i++];
if (argc > (int)i) mat_c = argv[i++];
if (argc > (int)i) mat_st = argv[i++];
if (argc > (int)i) num_modes = atoi(argv[i++]);
if (argc > (int)i) num_elems = atoi(argv[i++]);
if (argc > (int)i) num_reps = atoi(argv[i++]);
elem_size = num_modes*num_quants*num_cfr;
#if defined(_OPENMP)
#pragma omp parallel
{
#pragma omp master
{
l_num_threads = omp_get_num_threads();
}
}
#else
l_num_threads = 1;
#endif
l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double));
l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double));
amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t));
for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) {
amoks[i] = 0;
}
/* read matrices */
printf("reading sparse matrices... ");
edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz );
edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz );
edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz );
edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz );
printf("done!\n\n");
/* generate kernels */
printf("generating code... ");
l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff,
num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch);
l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star,
num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch);
a_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values ).dmm;
b_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values ).dmm;
c_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_stiff, (unsigned int)num_cfr, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values ).dmm;
st_kernel = libxsmm_create_packed_spxgemm_csr( l_xgemm_desc_star, (unsigned int)num_cfr, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values ).dmm;
if ( a_kernel == 0 ) {
printf("a kernel could not be built -> exit!");
exit(-1);
}
if ( b_kernel == 0 ) {
printf("b kernel could not be built -> exit!");
exit(-1);
}
if ( b_kernel == 0 ) {
printf("c kernel could not be built -> exit!");
exit(-1);
}
if ( st_kernel == 0 ) {
printf("st kernel could not be built -> exit!");
exit(-1);
}
printf("done!\n\n");
/* copying code to 1 GB page */
#if 0
#if defined(EDGE_HP_1G) || defined(EDGE_HP_2M)
printf("copying code to 1GB page...\n");
onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 );
memcpy( onegcode, (void*) a_kernel, 1505 );
memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 );
memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 );
memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 );
a_kernel = (libxsmm_dmmfunction)onegcode;
b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64);
c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128);
st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196);
printf("...done\n\n");
#endif
#endif
/* create unknowns and t-unknowns */
printf("allocating and initializing fake data... \n");
/* DoFs */
printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) );
q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152);
/* tDofs */
printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) );
qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152);
/* star matrices */
printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) );
star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152);
/* stiffness matrices */
printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) );
global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152);
/* per thread scratch */
printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) );
qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152);
for (i = 0; i < (int)num_elems; i++) {
for (j = 0; j < (int)elem_size; j++) {
q[i*elem_size + j] = libxsmm_rng_f64();
}
}
for (i = 0; i < (int)num_elems; i++) {
for (j = 0; j < (int)elem_size; j++) {
qt[i*elem_size + j] = libxsmm_rng_f64();
}
}
for (i = 0; i < (int)l_num_threads; i++) {
for (j = 0; j < (int)elem_size; j++) {
qs[i*elem_size + j] = libxsmm_rng_f64();
}
}
for (i = 0; i < (int)num_elems; i++) {
for (j = 0; j < (int)mat_st_nnz*3; j++) {
star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64();
}
}
for (i = 0; i < 3; i++) {
for (j = 0; j < num_modes*num_modes; j++) {
global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64();
}
}
printf("allocation done!\n\n");
printf("running benchmark...\n");
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i, j)
#endif
{
#if defined(_OPENMP)
int mytid = omp_get_thread_num();
#else
int mytid = 0;
#endif
libxsmm_timer_tickint mystart, myend;
#if defined(HANDLE_AMOK)
size_t cur_amoks = 0;
size_t non_amoks = l_num_threads;
#endif
size_t l_el_chunk = 0;
size_t l_el_start = 0;
size_t l_el_end = 0;
/* initial work distribution */
amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end );
for (i = 0; i < (int)num_reps; i++) {
#if defined(HANDLE_AMOK)
/* did we had an amok? */
if (cur_amoks != amoks[8*l_num_threads]) {
cur_amoks = amoks[8*l_num_threads];
non_amoks = l_num_threads - cur_amoks;
/* re-balance work */
amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end );
}
#endif
mystart = libxsmm_timer_tick();
for (j = (int)l_el_start; j < (int)l_el_end; j++) {
#if 1
st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) );
a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) );
st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) );
b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) );
st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) );
c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) );
#else
matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) );
matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) );
matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) );
matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) );
matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) );
matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) );
#endif
}
myend = libxsmm_timer_tick();
l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend );
l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend );
#if defined(_OPENMP)
#pragma omp barrier
#endif
#if defined(HANDLE_AMOK)
/* checking for amoks is centralized business */
if (mytid == 0) {
/* amok check */
amok_detect( l_cur_thread_time, amoks, l_num_threads );
}
#if defined(_OPENMP)
#pragma omp barrier
#endif
#endif
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("...done!\n\n");
/* some timing stats */
time_max = 0.0;
time_min = 80000000;
time_avg = 0.0;
for (i = 0; i < (int)l_num_threads; i++) {
if( amoks[8*i] == 0 ) {
if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i];
if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i];
time_avg += l_total_thread[8*i];
}
}
time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads]));
flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0;
flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0;
flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0;
flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */
printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]);
for ( i = 0; i < (int)l_num_threads; i++ ) {
if ( amoks[8*i] != 0 ) {
printf("%i,", i);
}
}
printf("\n");
printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9));
printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) );
printf("done!\n\n");
/* some empty lines at the end */
printf("\n\n");
return 0;
}
|
scheduled-clauseModificado4.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=200,chunk,a[n],suma=0, num_threads,num_proc,in_parallel;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones o chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>200) n=200; chunk = atoi(argv[2]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma,num_threads,num_proc,in_parallel)schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
num_threads = omp_get_num_threads();
num_proc = omp_get_num_procs();
in_parallel = omp_in_parallel();
}
printf("Dentro de 'parallel for' num_threads=%d\n",num_threads);
printf("Dentro de 'parallel for' num_procs=%d\n",num_proc);
printf("Dentro de 'parallel for' in_parallel=%d\n",in_parallel);
printf("Fuera de 'parallel for' suma=%d\n",suma);
printf("Fuera de 'parallel for' num_threads=%d\n",omp_get_num_threads());
printf("Fuera de 'parallel for' num_procs=%d\n",omp_get_num_procs());
printf("Fuera de 'parallel for' in_parallel=%d\n",omp_in_parallel());
}
|
copy-private.c | #include <stdio.h>
#include <omp.h>
main() {
int n = 9, i, b[n];
for (i=0; i<n; i++)
b[i] = -1;
#pragma omp parallel
{
int a;
#pragma omp single
{
printf("\nIntroduce valor de inicialización a: ");
scanf("%d", &a );
printf("\nSingle ejecutada por el thread %d\n", omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++){
b[i] = a;
printf("\nFOR b[%d] ejecutado por el thread %d\n",i, omp_get_thread_num());
}
}
printf("Depués de la región parallel:\n");
for (i=0; i<n; i++)
printf("b[%d] = %d\t",i,b[i]);
printf("\n");
}
|
block-6.c | // { dg-do compile }
void foo()
{
#pragma omp ordered
{
return; // { dg-error "invalid branch to/from OpenMP structured block" }
}
}
|
scheduled-clauseModificado4.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=200,chunk,a[n],suma=0, num_threads,num_proc,in_parallel;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones o chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>200) n=200; chunk = atoi(argv[2]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma,num_threads,num_proc,in_parallel)schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
num_threads = omp_get_num_threads();
num_proc = omp_get_num_procs();
in_parallel = omp_in_parallel();
}
printf("Dentro de 'parallel for' num_threads=%d\n",num_threads);
printf("Dentro de 'parallel for' num_procs=%d\n",num_proc);
printf("Dentro de 'parallel for' in_parallel=%d\n",in_parallel);
printf("Fuera de 'parallel for' suma=%d\n",suma);
printf("Fuera de 'parallel for' num_threads=%d\n",omp_get_num_threads());
printf("Fuera de 'parallel for' num_procs=%d\n",omp_get_num_procs());
printf("Fuera de 'parallel for' in_parallel=%d\n",omp_in_parallel());
}
|
GB_binop__bset_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bset_int32
// A.*B function (eWiseMult): GB_AemultB__bset_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bset_int32
// C+=b function (dense accum): GB_Cdense_accumb__bset_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_int32
// C=scalar+B GB_bind1st__bset_int32
// C=scalar+B' GB_bind1st_tran__bset_int32
// C=A+scalar GB_bind2nd__bset_int32
// C=A'+scalar GB_bind2nd_tran__bset_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITSET (x, y, int32_t, 32) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bset_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bset_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bset_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bset_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bset_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bset_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bset_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \
}
GrB_Info GB_bind1st_tran__bset_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \
}
GrB_Info GB_bind2nd_tran__bset_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__log2_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log2_fp64_fp64
// op(A') function: GB_unop_tran__log2_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = log2 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = log2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log2_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log2 (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log2_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LG_brutal_malloc.c | //------------------------------------------------------------------------------
// LG_brutal_malloc: brutal memory debugging
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
// Contributed by Timothy A. Davis, Texas A&M University
//------------------------------------------------------------------------------
// To enable brutal memory debugging, these four functions must be passed to
// LAGr_Init.
#include "LG_internal.h"
//------------------------------------------------------------------------------
// global variables: LG_brutal and LG_nmalloc
//------------------------------------------------------------------------------
// If LG_brutal >= 0, then that value gives the # of malloc/calloc/realloc
// calls that will succeed. Each time malloc/calloc/realloc is called, the
// LG_brutal count is decremented. Once it reaches zero, no more memory
// allocations will occur, and LG_brutal_malloc, etc, all pretend to fail
// and return NULL.
// If LG_brutal is negative, the LG_brutal_malloc/calloc/realloc functions act
// like the regular malloc/calloc/realloc functions, with no pretend failures.
// LG_nmalloc is the count of the # of allocated blocks. It is incremented by
// LG_brutal_malloc/calloc and by LG_brutal_realloc if a new block is allocated.
// It is decremented by LG_brutal_free. After LAGraph_Finalize is called,
// this value should be zero. If nonzero, a memory leak has occured.
int64_t LG_brutal = -1 ;
int64_t LG_nmalloc = 0 ;
//------------------------------------------------------------------------------
// LG_brutal_malloc
//------------------------------------------------------------------------------
LAGRAPH_PUBLIC
void *LG_brutal_malloc // return pointer to allocated block of memory
(
size_t size // # of bytes to allocate
)
{
void *p ;
if (LG_brutal == 0)
{
// pretend to fail
p = NULL ;
}
else
{
// malloc a new block
#pragma omp critical (LG_brutal_malloc_critical)
{
// malloc the block of memory (of size at least 1 byte)
p = malloc (LAGRAPH_MAX (1, size)) ;
if (LG_brutal > 0)
{
// one step closer to pretending to fail
LG_brutal-- ;
}
if (p != NULL)
{
// one more block of memory successfully allocated
LG_nmalloc++ ;
}
}
}
return (p) ;
}
//------------------------------------------------------------------------------
// LG_brutal_calloc
//------------------------------------------------------------------------------
LAGRAPH_PUBLIC
void *LG_brutal_calloc // return pointer to allocated block of memory
(
size_t nitems, // # of items to allocate
size_t itemsize // # of bytes per item
)
{
size_t size = LAGRAPH_MAX (1, nitems * itemsize) ;
void *p = LG_brutal_malloc (size) ;
if (p != NULL)
{
memset (p, 0, size) ;
}
return (p) ;
}
//------------------------------------------------------------------------------
// LG_brutal_free
//------------------------------------------------------------------------------
LAGRAPH_PUBLIC
void LG_brutal_free
(
void *p // block to free
)
{
if (p != NULL)
{
#pragma omp critical (LG_brutal_malloc_critical)
{
// free the block
free (p) ;
// one less block of memory allocated
LG_nmalloc-- ;
}
}
}
//------------------------------------------------------------------------------
// LG_brutal_realloc
//------------------------------------------------------------------------------
LAGRAPH_PUBLIC
void *LG_brutal_realloc // return pointer to reallocated memory
(
void *p, // block to realloc
size_t size // new size of the block
)
{
if (p == NULL)
{
// malloc a new block
p = LG_brutal_malloc (size) ;
}
else
{
// realloc an existing block
#pragma omp critical (LG_brutal_malloc_critical)
{
if (LG_brutal == 0)
{
// pretend to fail
p = NULL ;
}
else
{
// realloc the block
p = realloc (p, size) ;
if (LG_brutal > 0)
{
// one step closer to pretending to fail
LG_brutal-- ;
}
}
}
}
return (p) ;
}
|
reduction_average.c | #include<stdio.h>
#include<omp.h>
#define MAX 5
int main() {
double ave=0.0, A[MAX];
int i;
for (i=0; i<MAX; i++) {
A[i] = i+1.0;
}
#pragma omp parallel for reduction(+: ave)
for (i=0; i<MAX; i++) {
ave += A[i];
}
ave /= MAX;
printf("%f\n",ave);
return 0;
}
|
switch-1.c | #include <assert.h>
#define s 100
#pragma omp declare target
int
switch1 (int a)
{
switch (a)
{
case 1:
return 11;
case 33:
return 333;
case 55:
return 55;
default:
return -1;
}
}
int
switch2 (int a)
{
switch (a)
{
case 1 ... 11:
return 11;
break;
case 33:
return 333;
break;
case 55:
return 55;
break;
default:
return -1;
}
}
int
switch3 (int a)
{
switch (a)
{
case 1 ... 11:
return 11;
case 12 ... 22:
return 22;
case 23 ... 33:
return 33;
case 34 ... 44:
return 44;
default:
return 44;
}
}
int
switch4 (int a, int b)
{
switch (a)
{
case 1 ... 11:
return a;
case 12 ... 22:
return b;
case 23 ... 33:
return a;
case 34 ... 44:
return b;
default:
return 12345;
}
}
int
switch5 (int a, int b)
{
switch (a)
{
case 1 ... 2:
return 1;
case 3 ... 4:
return 2;
case 5 ... 6:
return 3;
case 7 ... 11:
return 4;
}
return -1;
}
#pragma omp end declare target
int
main (int argc)
{
int array[s];
#pragma omp target map(tofrom : array[:s])
{
for (int i = 0; i < s; i++)
array[i] = switch1 (i);
}
for (int i = 0; i < s; i++)
assert (array[i] == switch1 (i));
#pragma omp target map(tofrom : array[:s])
{
for (int i = 0; i < s; i++)
array[i] = switch2 (i);
}
for (int i = 0; i < s; i++)
assert (array[i] == switch2 (i));
#pragma omp target map(tofrom : array[:s])
{
for (int i = 0; i < s; i++)
array[i] = switch3 (i);
}
for (int i = 0; i < s; i++)
assert (array[i] == switch3 (i));
#pragma omp target map(tofrom : array[:s])
{
for (int i = 0; i < s; i++)
array[i] = switch4 (i, i + 1);
}
for (int i = 0; i < s; i++)
assert (array[i] == switch4 (i, i + 1));
#pragma omp target map(tofrom : array[:s])
{
for (int i = 0; i < s; i++)
array[i] = switch5 (i, i + 1);
}
for (int i = 0; i < s; i++)
assert (array[i] == switch5 (i, i + 1));
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, outptr, 0);
r0 += 4 * 2;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_msa(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
map_alm_qu_eb.h | /**************************************************************
Compute map2alm_pol and alm2map_pol for QU and EB only
***************************************************************/
#ifdef UPDATE_HEALPIX3_60_SOLVED
#include <vector>
//#include "alm_healpix_tools.h"
//#include "alm_map_tools.h"
#include "alm.h"
#include "arr.h"
// #include "fftpack_support.h"
#include "ylmgen.h"
#include "healpix_map.h"
#include "xcomplex.h"
using namespace std;
/*! A class holding information about a ring of pixels in a spherical map. */
class ringinfo
{
public:
double theta, phi0, weight, cth, sth;
int nph, ofs;
ringinfo()
: nph(0) {}
//! Constructs a \a ringinfo object.
// \param theta_ colatitude of the ring (in radian)
// \param phi0_ longitude of the first pixel in the ring (in radian)
// \param weight_ weighting factor for all pixels in the ring. This is typically the surface of a pixel in sterad.
// \note \a weight_ is only needed for map analysis, not synthesis.
// \param nph_ number of pixels in the ring
// \param ofs_ index of the first ring pixel in the total map array (counting from zero)
ringinfo (double theta_, double phi0_, double weight_, int nph_, int ofs_)
: theta(theta_), phi0(phi0_), weight(weight_),
cth(cos(theta)), sth(sin(theta)), nph(nph_), ofs(ofs_)
{}
};
/*! A class holding information about a ring pair in a spherical map. */
class ringpair
{
public:
ringinfo r1, r2;
//! Initialize the object with the ring described by \a info. The second ring is left empty.
ringpair (const ringinfo &info)
: r1(info) {}
//! Initialize the object with the rings described by \a info1 and \a info2.
// \note The colatitude of \a info2 must be \f$\pi\f$ minus the colatitude of \a info1.
ringpair (const ringinfo &info1,const ringinfo &info2)
: r1(info1), r2(info2)
{
planck_assert( approx( r1.theta, pi-r2.theta, 1e-10 ), "invalid ringpair" );
}
};
namespace {
struct info_comparator
{
inline bool operator()( const ringinfo &a, const ringinfo &b ){ return a.sth<b.sth; }
};
struct pair_comparator
{
inline bool operator()(const ringpair &a, const ringpair &b)
{
if( a.r1.nph == b.r1.nph )
{
return a.r1.phi0<b.r1.phi0;
}
else
{
return a.r1.nph<b.r1.nph;
}
}
};
void init_lam_fact_1d( int m, arr<double> &lam_fact )
{
for( int l=m; l < (int) lam_fact.size(); ++l)
{
lam_fact[l] = (l<2) ? 0. : 2*sqrt((2*l+1.)/(2*l-1.) * (l*l-m*m));
}
}
void init_lam_fact_deriv_1d( int m, arr<double> &lam_fact )
{
lam_fact[m]=0;
for( int l=m+1; l < (int) lam_fact.size(); ++l)
{
lam_fact[l] = sqrt((2*l+1.)/(2*l-1.) * (l*l-m*m));
}
}
void init_normal_l( arr<double> &normal_l )
{
for( int l=0; l < (int) normal_l.size(); ++l)
{
normal_l[l] = (l<2) ? 0. : sqrt(1./((l+2.)*(l+1.)*l*(l-1.)));
}
}
void get_chunk_info( int nrings, int &nchunks, int &chunksize )
{
nchunks = nrings/max(100,nrings/10) + 1;
chunksize = ( nrings + nchunks -1 )/nchunks;
}
class ringhelper
{
private:
double phi0_;
arr<xcomplex<double> > shiftarr, work;
rfft plan;
bool norot;
void update( int nph, int mmax, double phi0 )
{
norot = ( abs(phi0) < 1e-14 );
if( !norot )
{
if( ( mmax != (int) shiftarr.size()-1 ) || ( !approx( phi0, phi0_, 1e-12 ) ) )
{
shiftarr.alloc(mmax+1);
phi0_ = phi0;
for( int m=0; m <= mmax; ++m)
{
shiftarr[m] = xcomplex<REAL> (cos(m*phi0), sin(m*phi0) );
}
}
}
if( nph != (int) plan.size() )
{
plan.Set(nph);
}
if( nph > (int) work.size() )
{
work.alloc(2*nph);
}
}
public:
ringhelper() : phi0_(0), norot(true) {}
template<typename T> void phase2ring( int nph, int mmax, double phi0, const xcomplex<double> *phase, T *ring )
{
update( nph, mmax, phi0 );
for( int m=1; m < nph; ++m )
{
work[m]=0;
}
work[0] = phase[0];
if(norot)
{
for( int m=1; m <= mmax; ++m )
{
work[m%nph] += phase[m];
work[nph-1-((m-1)%nph)] += conj(phase[m]);
}
}
else
{
for( int m=1; m <= mmax; ++m )
{
xcomplex<double> tmp = phase[m]*shiftarr[m];
work[m%nph] += tmp;
work[nph-1-((m-1)%nph)] += conj(tmp);
}
}
plan.backward_c( work );
for( int m=0; m < nph; ++m )
{
ring[m] = real(work[m]);
}
}
template<typename T> void phase2ring( int mmax, const xcomplex<double> *phase, const ringinfo &info, T *data )
{
if( info.nph > 0 )
{
phase2ring( info.nph, mmax, info.phi0, phase, data+info.ofs );
}
}
template<typename T> void phase2pair( int mmax, const xcomplex<double> *phase1, const xcomplex<double> *phase2, const ringpair &pair, T *data )
{
phase2ring( mmax, phase1, pair.r1, data );
phase2ring( mmax, phase2, pair.r2, data );
}
template<typename T> void ring2phase( int nph, int mmax, double phi0, double weight, const T *ring, xcomplex<double> *phase )
{
update( nph, mmax, -phi0 );
for( int m=0; m < nph; ++m )
{
work[m] = ring[m]*weight;
}
plan.forward_c( work );
if( norot )
{
for( int m=0; m <= mmax; ++m )
{
phase[m] = work[m%nph];
}
}
else
{
for( int m=0; m <= mmax; ++m )
{
phase[m] = work[m%nph]*shiftarr[m];
}
}
}
template<typename T> void ring2phase( int mmax, const ringinfo &info, const T *data, xcomplex<double> *phase )
{
if( info.nph > 0 )
{
ring2phase( info.nph, mmax, info.phi0, info.weight, data+info.ofs, phase );
}
}
template<typename T> void pair2phase( int mmax, const ringpair &pair, const T *data, xcomplex<double> *phase1, xcomplex<double> *phase2 )
{
ring2phase( mmax, pair.r1, data, phase1 );
ring2phase( mmax, pair.r2, data, phase2 );
}
};
void healpix2ringpairs( const Healpix_Base &base, const arr<double> &weight, std::vector<ringpair> &pair )
{
pair.clear();
int startpix, ringpix;
double theta, wgt, phi0;
bool shifted;
int nside = base.Nside();
for(int m=0; m < 2*nside-1; ++m)
{
base.get_ring_info2( m+1, startpix, ringpix, theta, shifted );
wgt = weight[m]*fourpi/base.Npix();
phi0 = shifted ? pi/ringpix : 0;
pair.push_back( ringpair( ringinfo( theta, phi0, wgt, ringpix, startpix ), ringinfo( pi-theta, phi0, wgt, ringpix, base.Npix()-startpix-ringpix ) ) );
}
base.get_ring_info2( 2*nside, startpix, ringpix, theta, shifted );
wgt = weight[2*nside-1]*fourpi/base.Npix();
phi0 = shifted ? pi/ringpix : 0;
pair.push_back( ringpair( ringinfo( theta, phi0, wgt, ringpix, startpix ) ) );
}
void healpix2ringpairs( const Healpix_Base &base, std::vector<ringpair> &pair )
{
arr<double> wgt( 2*base.Nside() );
wgt.fill(1);
healpix2ringpairs( base, wgt, pair );
}
} // namespace
/*
void info2pair( const std::vector<ringpair> &info, std::vector<ringpair> &pair )
{
pair.clear();
vector<ringinfo> info2=info;
sort( info2.begin(), info2.end(), info_comparator() );
unsigned int pos=0;
while( pos < info2.size()-1 )
{
if( approx( info2[pos].cth, -info2[pos+1].cth, 1e-12 ) )
{
pair.push_back( ringpair( info2[pos], info2[pos+1] ) );
pos += 2;
}
else
{
pair.push_back( ringpair( info2[pos] ) );
++pos;
}
}
if( pos < info2.size() )
{
pair.push_back(info2[pos]);
}
sort( pair.begin(), pair.end(), pair_comparator() );
}
*/
/***************************************************************************************************************************/
//void alm2map_pol_QU( /*const Alm<xcomplex<T> > &almT,*/ const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, const vector<ringpair> &pair, /* T *mapT,*/ T *mapQ, T *mapU )
template<typename T> void alm2map_pol_QU( const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, const std::vector<ringpair> &pair, T *mapQ, T *mapU )
{
int lmax = almE.Lmax();
int mmax = almE.Mmax();
planck_assert( almE.conformable(almB), "alm2map_pol: a_lm are not conformable" );
arr<double> normal_l (lmax+1);
init_normal_l( normal_l );
int nchunks, chunksize;
get_chunk_info( pair.size(), nchunks, chunksize );
arr2<xcomplex<double> > phas1Q(chunksize,mmax+1), phas2Q(chunksize,mmax+1), phas1U(chunksize,mmax+1), phas2U(chunksize,mmax+1);//phas1T(chunksize,mmax+1), phas2T(chunksize,mmax+1)
for(int chunk=0; chunk < nchunks; ++chunk)
{
int llim = chunk*chunksize, ulim = min( llim+chunksize, int(pair.size()) );
#pragma omp parallel
{
Ylmgen generator( lmax, mmax, 1e-30 );
arr<double> Ylm;
arr<double> lam_fact (lmax+1);
arr<xcomplex<double>[2]> alm_tmp(lmax+1);
int m;
#pragma omp for schedule(dynamic,1)
for( m=0; m <= mmax; ++m )
{
int m2 = m*m;
init_lam_fact_1d( m, lam_fact );
for(int l=m; l <= lmax; ++l)
{
//alm_tmp[l][0] = almT(l,m);
// alm_tmp[l][0].re = almE(l,m).re *(-normal_l[l]);
// alm_tmp[l][0].im = almE(l,m).im *(-normal_l[l]);
alm_tmp[l][0] = almE(l,m) * xcomplex<REAL>(-normal_l[l],0.);
// alm_tmp[l][1].re = almB(l,m).re*(-normal_l[l]);
//alm_tmp[l][1].im = almB(l,m).im*(-normal_l[l]);
alm_tmp[l][1] = almB(l,m) * xcomplex<REAL>(-normal_l[l],0.);
}
for(int ith=0; ith < ulim-llim; ++ith)
{
double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth;
int l;
generator.get_Ylm( cth, sth, m, Ylm, l );
if( l <= lmax )
{
double one_on_s2 = 1/(sth*sth);
double c_on_s2 = cth * one_on_s2;
double two_on_s2 = 2*one_on_s2;
double m_on_s2 = m*one_on_s2;
double twocth = 2*cth;
if( pair[ith+llim].r2.nph > 0 )
{
xcomplex<double> Q1=0, Q2=0, U1=0, U2=0;//T1=0, T2=0
double lam_lm = 0;
if( (l-m)&1 )
{
//ALM2MAP_POL_MACRO_QU(Q2,Q1,U2,U1)
double lam_lm1m = lam_lm;
lam_lm = Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
xcomplex<double> lambda_w = xcomplex<double>(a_w*lam_lm - t1*c_on_s2,0.);
xcomplex<double> lambda_x = xcomplex<double>(m_on_s2 * (a_x-t1),0.);
// Q2.re += alm_tmp[l][0].re*lambda_w;
// Q2.im += alm_tmp[l][0].im*lambda_w;
Q2 += alm_tmp[l][0] * lambda_w;
// U2.re -= alm_tmp[l][1].re*lambda_w;
// U2.im -= alm_tmp[l][1].im*lambda_w;
U2 -= alm_tmp[l][1] * lambda_w;
// Q1.re -= alm_tmp[l][1].im*lambda_x;
// Q1.im += alm_tmp[l][1].re*lambda_x;
Q1 += xcomplex<REAL>( - imag(alm_tmp[l][1]) lambda_x), real(alm_tmp[l][1] * lambda_x);
// U1.re -= alm_tmp[l][0].im*lambda_x;
// U1.im += alm_tmp[l][0].re*lambda_x;
U1 += xcomplex<REAL>( - imag(alm_tmp[l][0] * lambda_x), real(alm_tmp[l][0]* lambda_x));
++l;
}
for( ; l < lmax ; )
{
//ALM2MAP_POL_MACRO_QU(Q1,Q2,U1,U2)
double lam_lm1m = lam_lm;
lam_lm = Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
xcomplex<double> lambda_w = xcomplex<double> (a_w*lam_lm - t1*c_on_s2,0.);
xcomplex<double> lambda_x = xcomplex<double> (m_on_s2 * (a_x-t1),0.);
// Q1.re += alm_tmp[l][0].re*lambda_w;
// Q1.im += alm_tmp[l][0].im*lambda_w;
Q1 += alm_tmp[l][0] * lambda_w;
// U1.re -= alm_tmp[l][1].re*lambda_w;
// U1.im -= alm_tmp[l][1].im*lambda_w;
U1 += alm_tmp[l][1] * lambda_w;
// Q2.re -= alm_tmp[l][1].im*lambda_x;
// Q2.im += alm_tmp[l][1].re*lambda_x;
Q2 += xcomplex<REAL>( - imag(alm_tmp[l][1] * lambda_x), real(alm_tmp[l][1]* lambda_x));
// U2.re -= alm_tmp[l][0].im*lambda_x;
// U2.im += alm_tmp[l][0].re*lambda_x;
U2 += xcomplex<REAL>( - imag(alm_tmp[l][0] * lambda_x), real(alm_tmp[l][0]* lambda_x));
++l;
//ALM2MAP_POL_MACRO_QU(Q2,Q1,U2,U1)
lam_lm1m = lam_lm;
lam_lm = Ylm[l];
t1 = lam_lm1m*lam_fact[l];
a_w = (l-m2)*two_on_s2 + l*(l-1);
a_x = twocth*(l-1)*lam_lm;
lambda_w = a_w*lam_lm - t1*c_on_s2;
lambda_x = m_on_s2 * (a_x-t1);
// Q2.re += alm_tmp[l][0].re*lambda_w;
// Q2.im += alm_tmp[l][0].im*lambda_w;
Q2 += alm_tmp[l][0] * xcomplex<REAL>(lambda_w,0.);
// U2.re -= alm_tmp[l][1].re*lambda_w;
// U2.im -= alm_tmp[l][1].im*lambda_w;
U2 -= alm_tmp[l][1] * xcomplex<REAL>(lambda_w,0.);
// Q1.re -= alm_tmp[l][1].im*lambda_x;
// Q1.im += alm_tmp[l][1].re*lambda_x;
Q1 += xcomplex<REAL>( - imag(alm_tmp[l][1] * lambda_x), real(alm_tmp[l][1] * lambda_x));
// U1.re -= alm_tmp[l][0].im*lambda_x;
// U1.im += alm_tmp[l][0].re*lambda_x;
U1 += xcomplex<REAL>( - imag(alm_tmp[l][0] * lambda_x), real(alm_tmp[l][0]* lambda_x));
++l;
}
if( l == lmax )
{
//ALM2MAP_POL_MACRO_QU(Q1,Q2,U1,U2)
double lam_lm1m = lam_lm;
lam_lm = Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
xcomplex<double> lambda_w = xcomplex<double>(a_w*lam_lm - t1*c_on_s2,0.);
xcomplex<double> lambda_x = xcomplex<double>(m_on_s2 * (a_x-t1),0.);
// Q1.re += alm_tmp[l][0].re*lambda_w;
// Q1.im += alm_tmp[l][0].im*lambda_w;
Q1 += alm_tmp[l][0] * lambda_w;
// U1.re -= alm_tmp[l][1].re*lambda_w;
// U1.im -= alm_tmp[l][1].im*lambda_w;
U1 -= alm_tmp[l][1] * lambda_w;
// Q2.re -= alm_tmp[l][1].im*lambda_x;
// Q2.im += alm_tmp[l][1].re*lambda_x;
Q2 += xcomplex<REAL>( - imag(alm_tmp[l][1]) * lambda_x, real(alm_tmp[l][1])* lambda_x);
// U2.re -= alm_tmp[l][0].im*lambda_x;
// U2.im += alm_tmp[l][0].re*lambda_x;
U2 += xcomplex<REAL>( - imag(alm_tmp[l][0]) * lambda_x, real(alm_tmp[l][0]) * lambda_x);
++l;
}
//phas1T[ith][m] = T1+T2;
//phas2T[ith][m] = T1-T2;
phas1Q[ith][m] =-Q1-Q2;
phas2Q[ith][m] =-Q1+Q2;
phas1U[ith][m] = U1+U2;
phas2U[ith][m] = U1-U2;
}
else
{
xcomplex<double> Q1=0, U1=0;//T1=0,
double lam_lm = 0;
for( ; l <= lmax; )
{
//ALM2MAP_POL_MACRO_QU(Q1,Q1,U1,U1)
double lam_lm1m = lam_lm;
lam_lm = Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
xcomplex<REAL> lambda_w = xcomplex<REAL>(a_w*lam_lm - t1*c_on_s2,0.);
xcomplex<REAL> lambda_x = xcomplex<REAL>(m_on_s2 * (a_x-t1),0.);
// Q1.re += alm_tmp[l][0].re*lambda_w;
// Q1.im += alm_tmp[l][0].im*lambda_w;
Q1 += alm_tmp[l][0] * lambda_w;
// U1.re -= alm_tmp[l][1].re*lambda_w;
// U1.im -= alm_tmp[l][1].im*lambda_w;
U1 -= alm_tmp[l][1] * lambda_w;
// JLS: is there a bug here: Q1 and U1 again.
// Q1.re -= alm_tmp[l][1].im*lambda_x;
// Q1.im += alm_tmp[l][1].re*lambda_x;
Q1 += xcomplex<REAL>( - imag(alm_tmp[l][1]) * lambda_x, real(alm_tmp[l][1])* lambda_x);
// U1.re -= alm_tmp[l][0].im*lambda_x;
// U1.im += alm_tmp[l][0].re*lambda_x;
U1 += xcomplex<REAL>( - imag(alm_tmp[l][0]) * lambda_x, real(alm_tmp[l][0])* lambda_x);
++l;
}
//phas1T[ith][m] = T1;
phas1Q[ith][m] =-Q1;
phas1U[ith][m] = U1;
}
}
else
{
//phas1T[ith][m] = phas2T[ith][m] = 0;
phas1Q[ith][m] = phas2Q[ith][m] = 0;
phas1U[ith][m] = phas2U[ith][m] = 0;
}
}
}
} // end of parallel region
#pragma omp parallel
{
ringhelper helper;
int ith;
#pragma omp for schedule(dynamic,1)
for( ith=llim; ith < ulim; ++ith )
{
//helper.phase2pair( mmax, phas1T[ith-llim], phas2T[ith-llim], pair[ith], mapT );
helper.phase2pair( mmax, phas1Q[ith-llim], phas2Q[ith-llim], pair[ith], mapQ );
helper.phase2pair( mmax, phas1U[ith-llim], phas2U[ith-llim], pair[ith], mapU );
}
} // end of parallel region
}
}
template void alm2map_pol_QU( const Alm<xcomplex<float> > &almE, const Alm<xcomplex<float> > &almB, const std::vector<ringpair> &pair, float *mapQ, float *mapU );
template void alm2map_pol_QU( const Alm<xcomplex<double> > &almE, const Alm<xcomplex<double> > &almB, const std::vector<ringpair> &pair, double *mapQ, double *mapU );
//void map2alm_pol_QU( const vector<ringpair> &pair, /*const T *mapT,*/ const T *mapQ, const T *mapU, /*Alm<xcomplex<T> > &almT,*/ Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, bool add_alm )
template<typename T> void map2alm_pol_QU( const std::vector<ringpair> &pair, const T *mapQ, const T *mapU, Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, bool add_alm )
{
planck_assert( almE.conformable(almB), "map2alm_pol: a_lm are not conformable" );
int lmax = almE.Lmax(), mmax = almE.Mmax();
arr<double> normal_l (lmax+1);
init_normal_l( normal_l );
int nchunks, chunksize;
get_chunk_info( pair.size(), nchunks, chunksize );
arr2<xcomplex<double> > phas1Q(chunksize,mmax+1), phas2Q(chunksize,mmax+1), phas1U(chunksize,mmax+1), phas2U(chunksize,mmax+1);//phas1T(chunksize,mmax+1), phas2T(chunksize,mmax+1)
if( !add_alm )
{
//almT.SetToZero();
almE.SetToZero();
almB.SetToZero();
}
for( int chunk=0; chunk < nchunks; ++chunk )
{
int llim = chunk*chunksize, ulim = min( llim+chunksize, int(pair.size()) );
#pragma omp parallel
{
ringhelper helper;
int ith;
#pragma omp for schedule(dynamic,1)
for( ith=llim; ith < ulim; ++ith)
{
//helper.pair2phase( mmax, pair[ith], mapT, phas1T[ith-llim], phas2T[ith-llim] );
helper.pair2phase( mmax, pair[ith], mapQ, phas1Q[ith-llim], phas2Q[ith-llim] );
helper.pair2phase( mmax, pair[ith], mapU, phas1U[ith-llim], phas2U[ith-llim] );
}
} // end of parallel region
#pragma omp parallel
{
Ylmgen generator( lmax, mmax, 1e-30 );
arr<double> Ylm;
arr<double> lam_fact(lmax+1);
arr<xcomplex<double>[2] > alm_tmp(lmax+1);
int m;
#pragma omp for schedule(dynamic,1)
for( m=0; m <= mmax; ++m )
{
init_lam_fact_1d( m, lam_fact );
for( int l=m; l < (int) alm_tmp.size(); ++l )
{
alm_tmp[l][0] = alm_tmp[l][1] = 0;
}
for( int ith=0; ith < ulim-llim; ++ith )
{
int l;
double cth=pair[ith+llim].r1.cth, sth=pair[ith+llim].r1.sth;
generator.get_Ylm( cth, sth, m, Ylm, l );
if( l <= lmax )
{
double one_on_s2 = 1/(sth*sth);
double c_on_s2 = cth * one_on_s2;
double two_on_s2 = 2*one_on_s2;
double twocth = 2*cth;
int m2 = m*m;
double m_on_s2 = m*one_on_s2;
if( pair[ith+llim].r2.nph > 0 )
{
xcomplex<double> Q1 = phas1Q[ith][m]+phas2Q[ith][m], Q2 = phas1Q[ith][m]-phas2Q[ith][m], U1 = phas1U[ith][m]+phas2U[ith][m], U2 = phas1U[ith][m]-phas2U[ith][m];
//T1 = phas1T[ith][m]+phas2T[ith][m], T2 = phas1T[ith][m]-phas2T[ith][m]
double lam_lm = 0;
if( (l-m)&1 )
{
//MAP2ALM_POL_MACRO_QU(Q2,Q1,U2,U1)
double lam_lm1m=lam_lm;
lam_lm=Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
double lambda_w = a_w*lam_lm - t1*c_on_s2;
double lambda_x = m_on_s2 * (a_x-t1);
// alm_tmp[l][0].re += Q2.re*lambda_w - U1.im*lambda_x;
// alm_tmp[l][0].im += Q2.im*lambda_w + U1.re*lambda_x;
alm_tmp[l][0] += xcomplex<REAL>( real(Q2) * (REAL) lambda_x - imag(U1) * (REAL) lambda_x, imag(Q2) * (REAL) lambda_x + real(U1) * (REAL) lambda_x);
alm_tmp[l][0] += Q2 * (REAL) lambda_w;
alm_tmp[l][1] += xcomplex<REAL>( U2.real()*lambda_w + Q1.imag()*lambda_x, U2.imag()*lambda_w - Q1.real()*lambda_x);
++l;
}
for( ; l < lmax; )
{
//MAP2ALM_POL_MACRO_QU(Q1,Q2,U1,U2)
double lam_lm1m=lam_lm;
lam_lm=Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
double lambda_w = a_w*lam_lm - t1*c_on_s2;
double lambda_x = m_on_s2 * (a_x-t1);
// alm_tmp[l][0].re += Q1.re*lambda_w - U2.im*lambda_x;
// alm_tmp[l][0].im += Q1.im*lambda_w + U2.re*lambda_x;
alm_tmp[l][0] += xcomplex<REAL>( Q1.real()*lambda_w - U2.imag()*lambda_x, Q1.imag()*lambda_w + U2.real()*lambda_x);
// alm_tmp[l][1].re += U1.re*lambda_w + Q2.im*lambda_x;
// alm_tmp[l][1].im += U1.im*lambda_w - Q2.re*lambda_x;
alm_tmp[l][1]+= xcomplex<REAL>(U1.real()*lambda_w + Q2.imag()*lambda_x,U1.imag()*lambda_w - Q2.real()*lambda_x);
++l;
//MAP2ALM_POL_MACRO_QU(Q2,Q1,U2,U1)
lam_lm1m=lam_lm;
lam_lm=Ylm[l];
t1 = lam_lm1m*lam_fact[l];
a_w = (l-m2)*two_on_s2 + l*(l-1);
a_x = twocth*(l-1)*lam_lm;
lambda_w = a_w*lam_lm - t1*c_on_s2;
lambda_x = m_on_s2 * (a_x-t1);
// alm_tmp[l][0].re += Q2.re*lambda_w - U1.im*lambda_x;
// alm_tmp[l][0].im += Q2.im*lambda_w + U1.re*lambda_x;
alm_tmp[l][0] += xcomplex<REAL>(Q2.real()*lambda_w - U1.imag()*lambda_x, Q2.imag()*lambda_w + U1.real()*lambda_x);
// alm_tmp[l][1].re += U2.re*lambda_w + Q1.im*lambda_x;
// alm_tmp[l][1].im += U2.im*lambda_w - Q1.re*lambda_x;
alm_tmp[l][1] += xcomplex<REAL>(U2.real()*lambda_w + Q1.imag()*lambda_x,U2.imag()*lambda_w - Q1.real()*lambda_x);
++l;
}
if( l == lmax )
{
//MAP2ALM_POL_MACRO_QU(Q1,Q2,U1,U2)
double lam_lm1m=lam_lm;
lam_lm=Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
double lambda_w = a_w*lam_lm - t1*c_on_s2;
double lambda_x = m_on_s2 * (a_x-t1);
// alm_tmp[l][0].re += Q1.re*lambda_w - U2.im*lambda_x;
// alm_tmp[l][0].im += Q1.im*lambda_w + U2.re*lambda_x;
alm_tmp[l][0] += xcomplex<REAL>(Q1.real()*lambda_w - U2.imag()*lambda_x,Q1.imag()*lambda_w + U2.real()*lambda_x);
// alm_tmp[l][1].re += U1.re*lambda_w + Q2.im*lambda_x;
// alm_tmp[l][1].im += U1.im*lambda_w - Q2.re*lambda_x;
alm_tmp[l][1] += xcomplex<REAL>(U1.real()*lambda_w + Q2.imag()*lambda_x,U1.imag()*lambda_w - Q2.real()*lambda_x);
++l;
}
}
else
{
xcomplex<double> Q1 = phas1Q[ith][m], U1 = phas1U[ith][m];
//T1 = phas1T[ith][m]
double lam_lm = 0;
for( ; l <= lmax; )
{
//MAP2ALM_POL_MACRO_QU(Q1,Q1,U1,U1)
double lam_lm1m=lam_lm;
lam_lm=Ylm[l];
double t1 = lam_lm1m*lam_fact[l];
double a_w = (l-m2)*two_on_s2 + l*(l-1);
double a_x = twocth*(l-1)*lam_lm;
double lambda_w = a_w*lam_lm - t1*c_on_s2;
double lambda_x = m_on_s2 * (a_x-t1);
// alm_tmp[l][0].re += Q1.re*lambda_w - U1.im*lambda_x;
// alm_tmp[l][0].im += Q1.im*lambda_w + U1.re*lambda_x;
alm_tmp[l][0] += xcomplex<REAL>(Q1.real()*lambda_w - U1.imag()*lambda_x,Q1.imag()*lambda_w + U1.real()*lambda_x);
// alm_tmp[l][1].re += U1.re*lambda_w + Q1.im*lambda_x;
// alm_tmp[l][1].im += U1.im*lambda_w - Q1.re*lambda_x;
alm_tmp[l][1] += xcomplex<REAL>(U1.real()*lambda_w + Q1.imag()*lambda_x,U1.imag()*lambda_w - Q1.real()*lambda_x);
++l;
}
}
}
}
xcomplex<T> *palmE=almE.mstart(m), *palmB=almB.mstart(m);
// *palmT=almT.mstart(m)
for( int l=m; l <= lmax; ++l )
{
//palmT[l].re += alm_tmp[l][0].re;
//palmT[l].im += alm_tmp[l][0].im;
// palmE[l].re += alm_tmp[l][0].re*normal_l[l];
// palmE[l].im += alm_tmp[l][0].im*normal_l[l];
palmE[l] += xcomplex<REAL>(alm_tmp[l][0].real() *normal_l[l], alm_tmp[l][0].imag() *normal_l[l]);
// palmB[l].re += alm_tmp[l][1].re*normal_l[l];
// palmB[l].im += alm_tmp[l][1].im*normal_l[l];
almB[l] += xcomplex<REAL>(alm_tmp[l][1].real()*normal_l[l], alm_tmp[l][1].imag()*normal_l[l]);
}
}
} // end of parallel region
}
}
template void map2alm_pol_QU( const std::vector<ringpair> &pair, const float *mapQ, const float *mapU, Alm<xcomplex<float> > &almE, Alm<xcomplex<float> > &almB, bool add_alm );
template void map2alm_pol_QU( const std::vector<ringpair> &pair, const double *mapQ, const double *mapU, Alm<xcomplex<double> > &almE, Alm<xcomplex<double> > &almB, bool add_alm );
/*********************************************************************************************************************************************************************/
//void alm2map_pol_QU( /*const Alm<xcomplex<T> > &almT,*/ const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, /*Healpix_Map<T> &mapT,*/ Healpix_Map<T> &mapQ, Healpix_Map<T> &mapU )
template<typename T> void alm2map_pol_QU( const Alm<xcomplex<T> > &almE, const Alm<xcomplex<T> > &almB, Healpix_Map<T> &mapQ, Healpix_Map<T> &mapU )
{
/*
planck_assert( mapT.Scheme()==RING, "alm2map_pol: maps must be in RING scheme" );
planck_assert( mapT.conformable( mapQ ) && mapT.conformable( mapU ), "alm2map_pol: maps are not conformable" );
*/
planck_assert( mapQ.Scheme()==RING, "alm2map_pol: maps must be in RING scheme" );
planck_assert( mapQ.conformable( mapU ), "alm2map_pol: maps are not conformable" );
std::vector<ringpair> pair;
healpix2ringpairs( mapQ, pair );
alm2map_pol_QU( almE, almB, pair, &mapQ[0], &mapU[0] );
}
template void alm2map_pol_QU( const Alm<xcomplex<float> > &almE, const Alm<xcomplex<float> > &almB, Healpix_Map<float> &mapQ, Healpix_Map<float> &mapU );
template void alm2map_pol_QU( const Alm<xcomplex<double> > &almE, const Alm<xcomplex<double> > &almB, Healpix_Map<double> &mapQ, Healpix_Map<double> &mapU );
//void map2alm_pol_QU( /*const Healpix_Map<T> &mapT,*/ const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, /*Alm<xcomplex<T> > &almT,*/ Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, const arr<double> &weight, bool add_alm )
template<typename T> void map2alm_pol_QU( const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, const arr<double> &weight, bool add_alm=false )
{
/*
planck_assert( mapT.Scheme()==RING, "map2alm_pol: maps must be in RING scheme" );
planck_assert( mapT.conformable( mapQ ) && mapT.conformable( mapU ), "map2alm_pol: maps are not conformable" );
planck_assert( weight.size() >= 2*mapT.Nside(), "map2alm_pol: at least one weight array has too few entries" );
*/
planck_assert( mapQ.Scheme()==RING, "map2alm_pol: maps must be in RING scheme" );
planck_assert( mapQ.conformable( mapU ), "map2alm_pol: maps are not conformable" );
planck_assert( weight.size() >= (unsigned long) 2*mapQ.Nside(), "map2alm_pol: at least one weight array has too few entries" );
std::vector<ringpair> pair;
healpix2ringpairs( mapQ, weight, pair );
map2alm_pol_QU( pair, &mapQ[0], &mapU[0], almE, almB, add_alm );
}
template void map2alm_pol_QU( const Healpix_Map<float> &mapQ, const Healpix_Map<float> &mapU, Alm<xcomplex<float> > &almE, Alm<xcomplex<float> > &almB, const arr<double> &weight, bool add_alm );
template void map2alm_pol_QU( const Healpix_Map<double> &mapQ, const Healpix_Map<double> &mapU, Alm<xcomplex<double> > &almE, Alm<xcomplex<double> > &almB, const arr<double> &weight, bool add_alm );
//void map2alm_pol_iter_QU( /*const Healpix_Map<T> &mapT,*/ const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, /*Alm<xcomplex<T> > &almT,*/ Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, int num_iter, const arr<double> weight )
template<typename T> void map2alm_pol_iter_QU( const Healpix_Map<T> &mapQ, const Healpix_Map<T> &mapU, Alm<xcomplex<T> > &almE, Alm<xcomplex<T> > &almB, int num_iter, const arr<double> weight )
{
map2alm_pol_QU( mapQ, mapU, almE, almB, weight );
for(int iter=1; iter <= num_iter; ++iter)
{
Healpix_Map<T> mapQ2( mapQ.Nside(), mapQ.Scheme(), SET_NSIDE ), mapU2( mapQ.Nside(), mapQ.Scheme(), SET_NSIDE );//mapT2( mapT.Nside(), mapT.Scheme(), SET_NSIDE )
alm2map_pol_QU( almE, almB, mapQ2, mapU2 );
for(int m=0; m < mapQ.Npix(); ++m)
{
//mapT2[m] = mapT[m]-mapT2[m];
mapQ2[m] = mapQ[m]-mapQ2[m];
mapU2[m] = mapU[m]-mapU2[m];
}
map2alm_pol_QU( mapQ2, mapU2, almE, almB, weight, true );
}
}
//template void map2alm_pol_iter_QU( const Healpix_Map<float> &mapQ, const Healpix_Map<float> &mapU, Alm<xcomplex<float> > &almE, Alm<xcomplex<float> > &almB, int num_iter, const arr<double> &weight );
//template void map2alm_pol_iter_QU( const Healpix_Map<double> &mapQ, const Healpix_Map<double> &mapU, Alm<xcomplex<double> > &almE, Alm<xcomplex<double> > &almB, int num_iter, const arr<double> &weight );
#endif
|
pi.c | #include <err.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef WITH_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
typedef struct {
long n;
int is_verbose;
} Params;
Params get_params(int argc, char *argv[]);
double *compute_limits();
double partial_pi(double left, double right, Params *params);
#ifdef WITH_MPI
MPI_Datatype *alloc_mpi_params_type();
void free_mpi_params_type(MPI_Datatype *params_type);
#endif
int main(int argc, char *argv[]) {
const int root = 0;
int rank = 0, size = 1;
Params params;
double *limits, local_limits[2], partial_result, result = 0.0;
#ifdef WITH_MPI
#ifdef _OPENMP
int thread_level;
MPI_Init_thread(NULL, NULL, MPI_THREAD_FUNNELED, &thread_level);
if (thread_level != MPI_THREAD_FUNNELED) {
fprintf(stderr, "thread level funneled not supported\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
#else
MPI_Init(NULL, NULL);
#endif
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
if (rank == 0) {
params = get_params(argc, argv);
limits = compute_limits();
}
#ifdef WITH_MPI
MPI_Datatype *params_type = alloc_mpi_params_type();
MPI_Bcast(¶ms, 1, *params_type, root, MPI_COMM_WORLD);
free_mpi_params_type(params_type);
MPI_Scatter(limits, 2, MPI_DOUBLE, local_limits, 2, MPI_DOUBLE, root,
MPI_COMM_WORLD);
#else
local_limits[0] = limits[0];
local_limits[1] = limits[1];
#endif
if (rank == 0) {
free(limits);
}
if (params.is_verbose)
printf("rank %d out of %d: %.5lf -> %.5lf (%ld)\n",
rank, size, local_limits[0], local_limits[1], params.n);
partial_result = partial_pi(local_limits[0], local_limits[1], ¶ms);
#ifdef WITH_MPI
MPI_Reduce(&partial_result, &result, 1, MPI_DOUBLE, MPI_SUM, root,
MPI_COMM_WORLD);
#else
result = partial_result;
#endif
if (rank == 0) {
printf("pi = %.8lf\n", result);
}
#ifdef WITH_MPI
MPI_Finalize();
#endif
return EXIT_SUCCESS;
}
Params get_params(int argc, char *argv[]) {
int size = 1;
Params params;
#ifdef WITH_MPI
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
if (argc > 1)
params.n = atol(argv[1]);
else
params.n = 1000;
params.n = params.n/size;
if (argc > 2)
params.is_verbose = atoi(argv[2]);
else
params.is_verbose = 0;
return params;
}
double *compute_limits() {
int i, size = 1;
double *limits, delta;
#ifdef WITH_MPI
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
delta = 1.0/size;
if ((limits = (double *) malloc(2*size*sizeof(double))) == NULL) {
warnx("can not allocate array of %d elements", 2*size);
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
#else
exit(EXIT_FAILURE);
#endif
}
for (i = 0; i < size; i++) {
limits[2*i] = i*delta;
limits[2*i + 1] += (i + 1)*delta;
}
return limits;
}
double partial_pi(double left, double right, Params *params) {
int rank = 0;
double sum = 0.0, result;
double delta = (right - left)/params->n;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
#pragma omp parallel default(none) \
shared(sum, delta, left, right, result, params, rank)
{
long i;
double x;
if (params->is_verbose) {
int thread_num = 0, num_threads = 1;
#ifdef _OPENMP
thread_num = omp_get_thread_num();
num_threads = omp_get_num_threads();
#endif
printf("thread %d out of %d at rank %d\n",
thread_num, num_threads, rank);
}
#pragma omp for reduction(+:sum)
for (i = 0; i < params->n; i++) {
x = left + i*delta;
sum += 4.0/(1.0 + x*x);
}
#pragma omp single
result = sum*delta;
}
return result;
}
#ifdef WITH_MPI
MPI_Datatype *alloc_mpi_params_type() {
const int type_count = 2;
int block_lengths[] = {1, 1};
MPI_Aint displacements[] = {0, sizeof(long)};
MPI_Datatype types[] = {MPI_LONG, MPI_INT};
MPI_Datatype *params_type;
if (!(params_type = (MPI_Datatype *) malloc(sizeof(MPI_Datatype)))) {
warnx("can ot allocate MPI data type");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
MPI_Type_create_struct(type_count, block_lengths, displacements, types,
params_type);
MPI_Type_commit(params_type);
return params_type;
}
void free_mpi_params_type(MPI_Datatype *params_type) {
MPI_Type_free(params_type);
free(params_type);
}
#endif
|
ActivationsExecutioner.h | //
// @author raver119@gmail.com
//
#ifndef PROJECT_ACTIVATIONSEXECUTIONER_H
#define PROJECT_ACTIVATIONSEXECUTIONER_H
#include <layers/activations.h>
using namespace nd4j;
template <typename T> class ActivationsExecutioner {
public:
// This method should be backend-specific, and should be implemented accordingly
template<typename Activation> inline static void executeFF(NDArray<T> *input, NDArray<T> *output);
template<typename Activation> inline static void executeBP(NDArray<T> *input, NDArray<T> *epsilon, NDArray<T> *output);
};
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
///////////////////// implementation part ////////////////////////////
// This method should be backend-specific, and should be implemented accordingly
template<typename T> template<typename Activation>
void ActivationsExecutioner<T>::executeFF(NDArray<T> *input, NDArray<T> *output) {
// add special invocation here, like softmax case etc
if (Activation::requiresSpecialFF()) {
Activation::ffActivation(input, output);
return;
}
Nd4jIndex n = input->lengthOf();
//#pragma omp parallel for
for (Nd4jIndex e = 0; e < n; e++) {
output->getBuffer()[e] = Activation::ffActivation(input->getBuffer()[e]);
}
}
template<typename T> template<typename Activation>
void ActivationsExecutioner<T>::executeBP(NDArray<T> * input, NDArray<T> *epsilon, NDArray<T> *output) {
// add special invocation here, like softmax case etc
if (Activation::requiresSpecialBP()) {
Activation::bpActivation(input, epsilon, output);
return;
}
Nd4jIndex n = input->lengthOf();
//#pragma omp parallel for
for (Nd4jIndex e = 0; e < n; e++) {
output->getBuffer()[e] = Activation::bpActivation(input->getBuffer()[e], epsilon->getBuffer()[e]);
}
}
#endif //PROJECT_ACTIVATIONSEXECUTIONER_H
|
particlefilter.c | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
// RISC-V VECTOR Version by Cristóbal Ramírez Lazo, "Barcelona 2019"
#ifdef USE_RISCV_VECTOR
#include "../../common/vector_defines.h"
#endif
//#include <omp.h>
#include <limits.h>
#define PI 3.1415926535897932
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
#ifdef USE_RISCV_VECTOR
inline _MMR_f64 randu_vector(long int * seed, int index ,unsigned long int gvl)
{
/*
_MMR_i64 xseed = _MM_LOAD_i64(&seed[index],gvl);
_MMR_i64 xA = _MM_SET_i64(A,gvl);
_MMR_i64 xC = _MM_SET_i64(C,gvl);
_MMR_i64 xM = _MM_SET_i64(M,gvl);
xseed = _MM_MUL_i64(xseed,xA,gvl);
xseed = _MM_ADD_i64(xseed,xC,gvl);
_MM_STORE_i64(&seed[index],_MM_REM_i64(xseed,xM,gvl),gvl);
FENCE();
_MMR_f64 xResult;
xResult = _MM_DIV_f64(_MM_VFCVT_F_X_f64(xseed,gvl),_MM_VFCVT_F_X_f64(xM,gvl),gvl);
xResult = _MM_VFSGNJX_f64(xResult,xResult,gvl);
return xResult;
*/
/*
Esta parte del codigo deberia ser en 32 bits, pero las instrucciones de conversion aún no están disponibles,
moviendo todo a 64 bits el resultado cambia ya que no se desborda, y las variaciones son muchas.
*/
double result[256];
int num[256];
//FENCE();
//double* result = (double*)malloc(gvl*sizeof(double));
//int* num = (int*)malloc(gvl*sizeof(int));
FENCE();
for(int x = index; x < index+gvl; x++){
num[x-index] = A*seed[x] + C;
seed[x] = num[x-index] % M;
result[x-index] = fabs(seed[x]/((double) M));
}
_MMR_f64 xResult;
xResult = _MM_LOAD_f64(&result[0],gvl);
FENCE();
return xResult;
}
#endif // USE_RISCV_VECTOR
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
#ifdef USE_RISCV_VECTOR
_MMR_f64 randn_vector(long int * seed, int index ,unsigned long int gvl){
/*Box-Muller algorithm*/
_MMR_f64 xU = randu_vector(seed,index,gvl);
_MMR_f64 xV = randu_vector(seed,index,gvl);
_MMR_f64 xCosine;
_MMR_f64 xRt;
xV = _MM_MUL_f64(_MM_SET_f64(PI*2.0,gvl),xV,gvl);
xCosine =_MM_COS_f64(xV,gvl);
FENCE();
xU = _MM_LOG_f64(xU,gvl);
xRt = _MM_MUL_f64(_MM_SET_f64(-2.0,gvl),xU,gvl);
return _MM_MUL_f64(_MM_SQRT_f64(xRt,gvl),xCosine,gvl);
}
#endif // USE_RISCV_VECTOR
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((I[ind[y]] - 100),2) - pow((I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
// for(int a = 0; a < lengthCDF; a++)
// {
// printf("%f ",CDF[a]);
// }
// printf("\n");
// printf("CDF[x] >= value ,%f >= %f \n",CDF[0],value);
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses binary search before switching to sequential search
* @param CDF The CDF
* @param beginIndex The index to start searching from
* @param endIndex The index to stop searching
* @param value The value to find
* @return The index of value in the CDF; if value is never found, returns the last index
* @warning Use at your own risk; not fully tested
*/
int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){
if(endIndex < beginIndex)
return -1;
int middleIndex = beginIndex + ((endIndex - beginIndex)/2);
/*check the value*/
if(CDF[middleIndex] >= value)
{
/*check that it's good*/
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(middleIndex > 0 && CDF[middleIndex-1] == value)
middleIndex--;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
return findIndexBin(CDF, beginIndex, middleIndex+1, value);
return findIndexBin(CDF, middleIndex-1, endIndex, value);
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
//printf("countOnes = %d \n",countOnes); // 69
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
double * u = (double *)malloc(sizeof(double)*Nparticles);
int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles);
//#pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time()));
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
//#pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] += 1 + 5*randn(seed, x);
arrayY[x] += -2 + 2*randn(seed, x);
}
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
//particle filter likelihood
//#pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY)
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[x*countOnes + y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[x*countOnes + y] >= max_size)
ind[x*countOnes + y] = 0;
}
likelihood[x] = 0;
for(y = 0; y < countOnes; y++)
likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0;
likelihood[x] = likelihood[x]/((double) countOnes);
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
//#pragma omp parallel for shared(Nparticles, weights, likelihood) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
//#pragma omp parallel for private(x) reduction(+:sumWeights)
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
//#pragma omp parallel for shared(sumWeights, weights) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
//#pragma omp parallel for private(x) reduction(+:xe, ye)
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
//#pragma omp parallel for shared(u, u1, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
int j, i;
//#pragma omp parallel for shared(CDF, Nparticles, xj, yj, u, arrayX, arrayY) private(i, j)
for(j = 0; j < Nparticles; j++){
i = findIndex(CDF, Nparticles, u[j]);
if(i == -1)
i = Nparticles-1;
//printf("%d ", i);
xj[j] = arrayX[i];
yj[j] = arrayY[i];
}
//printf("\n");
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(xj);
free(yj);
free(arrayX);
free(arrayY);
free(CDF);
free(u);
free(ind);
}
#ifdef USE_RISCV_VECTOR
void particleFilter_vector(int * I, int IszX, int IszY, int Nfr, int * seed, long int * seed_64, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
//printf("countOnes = %d \n",countOnes); // 69
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
//#pragma omp parallel for shared(weights, Nparticles) private(x)
/*
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}*/
// unsigned long int gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
unsigned long int gvl = vsetvl_e64m1(Nparticles); //PLCT
_MMR_f64 xweights = _MM_SET_f64(1.0/((double)(Nparticles)),gvl);
for(x = 0; x < Nparticles; x=x+gvl){
// gvl = __builtin_epi_vsetvl(Nparticles-x, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles-x); //PLCT
_MM_STORE_f64(&weights[x],xweights,gvl);
}
FENCE();
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
double * u = (double *)malloc(sizeof(double)*Nparticles);
int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles);
/*
//#pragma omp parallel for shared(arrayX, arrayY, xe, ye) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
*/
// gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles); //PLCT
_MMR_f64 xArrayX = _MM_SET_f64(xe,gvl);
_MMR_f64 xArrayY = _MM_SET_f64(ye,gvl);
for(int i = 0; i < Nparticles; i=i+gvl){
// gvl = __builtin_epi_vsetvl(Nparticles-i, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles-i); //PLCT
_MM_STORE_f64(&arrayX[i],xArrayX,gvl);
_MM_STORE_f64(&arrayY[i],xArrayY,gvl);
}
FENCE();
_MMR_f64 xAux;
int k;
printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time()));
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
// gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles); //PLCT
for(x = 0; x < Nparticles; x=x+gvl){
// gvl = __builtin_epi_vsetvl(Nparticles-x, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles-x); //PLCT
xArrayX = _MM_LOAD_f64(&arrayX[x],gvl);
FENCE();
xAux = randn_vector(seed_64, x,gvl);
FENCE();
xAux = _MM_MUL_f64(xAux, _MM_SET_f64(5.0,gvl),gvl);
xAux = _MM_ADD_f64(xAux, _MM_SET_f64(1.0,gvl),gvl);
xArrayX = _MM_ADD_f64(xAux, xArrayX ,gvl);
_MM_STORE_f64(&arrayX[x],xArrayX,gvl);
xArrayY = _MM_LOAD_f64(&arrayY[x],gvl);
FENCE();
xAux = randn_vector(seed_64, x,gvl);
FENCE();
xAux = _MM_MUL_f64(xAux, _MM_SET_f64(2.0,gvl),gvl);
xAux = _MM_ADD_f64(xAux, _MM_SET_f64(-2.0,gvl),gvl);
xArrayY = _MM_ADD_f64(xAux, xArrayY ,gvl);
_MM_STORE_f64(&arrayY[x],xArrayY,gvl);
}
FENCE();
/*
//#pragma omp parallel for shared(arrayX, arrayY, Nparticles, seed) private(x)
for(x = 0; x < Nparticles; x++){
arrayX[x] += 1 + 5*randn(seed, x);
arrayY[x] += -2 + 2*randn(seed, x);
}
*/
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
//particle filter likelihood
//#pragma omp parallel for shared(likelihood, I, arrayX, arrayY, objxy, ind) private(x, y, indX, indY)
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[x*countOnes + y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[x*countOnes + y] >= max_size)
ind[x*countOnes + y] = 0;
}
likelihood[x] = 0;
for(y = 0; y < countOnes; y++)
likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0;
likelihood[x] = likelihood[x]/((double) countOnes);
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
//#pragma omp parallel for shared(Nparticles, weights, likelihood) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
//#pragma omp parallel for private(x) reduction(+:sumWeights)
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
//#pragma omp parallel for shared(sumWeights, weights) private(x)
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
//#pragma omp parallel for private(x) reduction(+:xe, ye)
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
//#pragma omp parallel for shared(u, u1, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
int j, i;
_MMR_MASK_i64 xComp;
_MMR_i64 xMask;
_MMR_f64 xCDF;
_MMR_f64 xU;
_MMR_i64 xArray;
long int vector_complete;
long int * locations = (long int *)malloc(sizeof(long int)*Nparticles);
long int valid;
// gvl = __builtin_epi_vsetvl(Nparticles, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles); //PLCT
for(i = 0; i < Nparticles; i=i+gvl){
// gvl = __builtin_epi_vsetvl(Nparticles-i, __epi_e64, __epi_m1);
gvl = vsetvl_e64m1(Nparticles-i); //PLCT
vector_complete = 0;
xMask = _MM_SET_i64(0,gvl);
xArray = _MM_SET_i64(Nparticles-1,gvl);
xU = _MM_LOAD_f64(&u[i],gvl);
for(j = 0; j < Nparticles; j++){
xCDF = _MM_SET_f64(CDF[j],gvl);
xComp = _MM_VFGE_f64(xCDF,xU,gvl);
xComp = _MM_CAST_i1_i64(_MM_XOR_i64(_MM_CAST_i64_i1(xComp,gvl),xMask,gvl),gvl);
valid = _MM_VMFIRST_i64(xComp,gvl);
if(valid != -1)
{
xArray = _MM_MERGE_i64(xArray,_MM_SET_i64(j,gvl),xComp,gvl);
xMask = _MM_OR_i64(_MM_CAST_i64_i1(xComp,gvl),xMask,gvl);
vector_complete = _MM_VMPOPC_i64(_MM_CAST_i1_i64(xMask,gvl),gvl);
}
if(vector_complete == gvl){ break; }
//FENCE();
}
_MM_STORE_i64(&locations[i],xArray,gvl);
}
FENCE();
//for(i = 0; i < Nparticles; i++) { printf("%d ", locations[i]); } printf("\n");
//#pragma omp parallel for shared(CDF, Nparticles, xj, yj, u, arrayX, arrayY) private(i, j)
for(j = 0; j < Nparticles; j++){
i = locations[j];
xj[j] = arrayX[i];
yj[j] = arrayY[i];
}
// for(j = 0; j < Nparticles; j++){ printf("%lf ", xj[i]); } printf("\n");
// for(j = 0; j < Nparticles; j++){ printf("%lf ", yj[i]); } printf("\n");
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
//#pragma omp parallel for shared(weights, Nparticles) private(x)
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(xj);
free(yj);
free(arrayX);
free(arrayY);
free(CDF);
free(u);
free(ind);
}
#endif
int main(int argc, char * argv[]){
char* usage = "openmp.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
{
seed[i] = time(0)*i;
}
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); // 128 * 128 * 10 = 163840 * sizeof(int)
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
#ifdef USE_RISCV_VECTOR
long int * seed_64 = (long int *)malloc(sizeof(long int)*Nparticles);
for(i = 0; i < Nparticles; i++)
{
seed_64[i] = (long int)seed[i];
}
//call particle filter
particleFilter_vector(I, IszX, IszY, Nfr, seed,seed_64, Nparticles);
#else
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
#endif
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
par.c | #include <stdio.h>
#include <limits.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#define V 10000
int num;
int minKey(int key[], int visited[])
{
int min = INT_MAX, index, i;
#pragma omp parallel
{
num = omp_get_num_threads();
int index_local = index;
int min_local = min;
#pragma omp for nowait
for (i = 0; i < V; i++)
{
if (visited[i] == 0 && key[i] < min_local)
{
min_local = key[i];
index_local = i;
}
}
#pragma omp critical
{
if (min_local < min)
{
min = min_local;
index = index_local;
}
}
}
return index;
}
void printMST(int from[], int n, int **graph)
{
int i;
printf("Edge Weight\n");
for (i = 1; i < V; i++)
printf("%d - %d %d \n", from[i], i, graph[i][from[i]]);
}
void primMST(int **graph)
{
int from[V];
int key[V], num_threads;
int visited[V];
int i, count;
for (i = 0; i < V; i++)
key[i] = INT_MAX, visited[i] = 0;
key[0] = 0;
from[0] = -1;
for (count = 0; count < V - 1; count++)
{
int u = minKey(key, visited);
visited[u] = 1;
int v;
#pragma omp parallel for schedule(static)
for (v = 0; v < V; v++)
{
if (graph[u][v] && visited[v] == 0 && graph[u][v] < key[v])
from[v] = u, key[v] = graph[u][v];
}
}
// printMST(from, V, graph);
// printf("\n%d threads are created in primMST\n", num_threads);
}
int main()
{
// int graph[V][V];
int **graph = (int **)malloc(V * sizeof(int *));
for (int x=0; x<V; x++)
graph[x] = (int *)malloc(V * sizeof(int));
int i, j;
//Generate random adjacency matrix
srand(time(NULL));
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
graph[i][j] = rand() % 10;
for (i = 0; i < V; i++)
{
graph[i][i] = 0;
}
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
graph[j][i] = graph[i][j];
//Print adjacency matrix
// for (i = 0; i < V; i++)
// {
// for (j = 0; j < V; j++)
// {
// printf("%d ", graph[i][j]);
// }
// printf("\n");
// }
double start = omp_get_wtime();
primMST(graph);
double end = omp_get_wtime();
printf("Time for par = %f\nThreads = %d\n", end - start, num);
return 0;
}
|
hermv_c_dia_n_lo.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t
ONAME(const ALPHA_Complex alpha,
const ALPHA_SPMAT_DIA *A,
const ALPHA_Complex *x,
const ALPHA_Complex beta,
ALPHA_Complex *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis == 0)
{
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < m; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + j]);
alpha_madde(tmp[threadId][j], v, x[j]);
}
}
else if(dis < 0)
{
const ALPHA_INT row_start = -dis;
const ALPHA_INT col_start = 0;
const ALPHA_INT nnz = m + dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Complex v,v_c;
ALPHA_Complex val_orig = A->values[start + row_start + j];
ALPHA_Complex val_conj = {val_orig.real,-val_orig.imag};
alpha_mul(v, alpha, val_orig);
alpha_mul(v_c, alpha, val_conj);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
alpha_madde(tmp[threadId][col_start + j], v_c, x[row_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
layer_example_f32.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#if defined(USE_BLAS) || defined(USE_IM2COL)
#include <mkl.h>
#endif
#define CHANNEL_BLOCKING 64
/* function-pointer to LIBXSMM kernel */
libxsmm_smmfunction_reducebatch_offs fwd_brgemmz;
libxsmm_smmfunction_reducebatch_offs fwd_brgemma;
typedef struct {
int nImg;
int nIfm;
int nOfm;
int ifhp;
int ifwp;
int ifh;
int ifw;
int ofhp;
int ofwp;
int ofh;
int ofw;
int pad_h;
int pad_w;
int pad_h_in;
int pad_w_in;
int pad_h_out;
int pad_w_out;
int kh;
int kw;
int stride_h;
int stride_w;
int RK;
int Mh;
int Mw;
} naive_conv_t;
typedef struct {
int nImg;
int nBIfm;
int nbIfm;
int nBOfm;
int nbOfm;
int ifhp;
int ifwp;
int ifh;
int ifw;
int ofhp;
int ofwp;
int ofh;
int ofw;
int pad_h;
int pad_w;
int pad_h_in;
int pad_w_in;
int pad_h_out;
int pad_w_out;
int kh;
int kw;
int stride_h;
int stride_w;
int RK;
int Mh;
int Mw;
unsigned long long brcount;
} gemm_conv_t;
typedef struct {
double max_rel_err;
double max_abs_err;
double l2_rel_err;
double one_norm_ref;
double one_norm_test;
} correctness_t;
LIBXSMM_INLINE void zero_buf(float* buf, long size) {
int i;
#if defined(_OPENMP)
#pragma omp parallel for private(i)
#endif
for (i = 0; i < size; ++i) {
buf[i] = 0.0f;
}
}
LIBXSMM_INLINE void copy_buf(float* src, float* dst, long size) {
int i;
#if defined(_OPENMP)
#pragma omp parallel for private(i)
#endif
for (i = 0; i < size; ++i) {
dst[i] = src[i];
}
}
LIBXSMM_INLINE void init_buf(float* buf, long size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? drand48() : (0.05 - drand48()/10.0)));
}
}
LIBXSMM_INLINE void set_zeropad_nchw(float* nchw, int N, int C, int H, int W, int Mh, int RK, int pad_h, int pad_w)
{
LIBXSMM_VLA_DECL(6, float, input, nchw, C, H, W, Mh, RK);
int n, h, w, c, m, rk;
for ( n = 0; n < N; n++ ) {
for ( c = 0; c < C; c++ ) {
for ( h = 0; h < H; h++ ) {
for ( w = 0; w < W; w++ ) {
for ( m = 0; m < Mh; m++ ) {
for ( rk = 0; rk < RK; rk++ ) {
if(h < pad_h || h >= H-pad_h || w < pad_w || w >= W-pad_w)
LIBXSMM_VLA_ACCESS(6, input, n, c, h, w, m, rk, C, H, W, Mh, RK) = 0.0;
}
}
}
}
}
}
}
LIBXSMM_INLINE void compare_buf(float* ref, float* test, long size, correctness_t* norms)
{
int i;
double diff, rel_err;
norms->max_rel_err = 0.;
norms->max_abs_err = 0.;
norms->l2_rel_err = 0.;
norms->one_norm_ref = 0.;
norms->one_norm_test = 0.;
for (i = 0; i < size; ++i) {
norms->one_norm_ref += (double)ref[i];
norms->one_norm_test += (double)test[i];
diff = fabs((double)ref[i] - (double)test[i]);
norms->l2_rel_err += (diff*diff);
rel_err = 0.0;
if (diff > 0.0 ) {
rel_err = diff/fabs((double)ref[i]);
}
if (rel_err > norms->max_rel_err) {
norms->max_rel_err = rel_err;
#if 0
printf("MISMATCH@ %3d: A=%12.8g B=%12.8g (E:%12.4e) (R:%12.4e)\n", i, ref[i], test[i], diff, rel_err);
#endif
}
if (diff > norms->max_abs_err) {
norms->max_abs_err = diff;
}
#if 0
if (diff > 1.0) {
printf("MISMATCH@ %3d: A=%12.8g B=%12.8g (E:%12.4e)\n", i, ref[i], test[i], diff);
}
#endif
}
norms->l2_rel_err = sqrt(norms->l2_rel_err);
}
LIBXSMM_INLINE void copy_naiveP_to_GEMM(const float* nchw, float* gemm, int N, int H, int W, int C, int Mh, int RK)
{
LIBXSMM_VLA_DECL(7, float, output, gemm, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING);
LIBXSMM_VLA_DECL(6, const float, input, nchw, H, W, C, Mh, RK);
int n, h, w, c1, c2, m, rk;
for ( n = 0; n < N; n++ ) {
for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) {
for ( m = 0; m < Mh; m++ ) {
for ( rk = 0; rk < RK; rk++ ) {
for ( h = 0; h < H; h++ ) {
for ( w = 0; w < W; w++ ) {
for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) {
LIBXSMM_VLA_ACCESS(7, output, n, c1, m, rk, h, w, c2, C/CHANNEL_BLOCKING, Mh, RK, H, W, CHANNEL_BLOCKING) =
LIBXSMM_VLA_ACCESS(6, input, n, h, w, (c1*CHANNEL_BLOCKING)+c2, m, rk, H, W, C, Mh, RK);
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void copy_GEMM_to_naiveV(const float* gemm, float* nchw, int N, int H, int W, int C, int Mh, int Mw)
{
LIBXSMM_VLA_DECL(7, const float, input, gemm, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING);
LIBXSMM_VLA_DECL(6, float, output, nchw, H, W, C, Mh, Mw);
int n, h, w, c1, c2, mi, mj;
for ( n = 0; n < N; n++ ) {
for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) {
for ( mj = 0; mj < Mh; mj++) {
for ( mi = 0; mi < Mw; mi++) {
for ( h = 0; h < H; h++ ) {
for ( w = 0; w < W; w++ ) {
for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) {
LIBXSMM_VLA_ACCESS(6, output, n, h, w, (c1*CHANNEL_BLOCKING)+c2, mj, mi, H, W, C, Mh, Mw) =
LIBXSMM_VLA_ACCESS(7, input, n, c1, mj, mi, h, w, c2, C/CHANNEL_BLOCKING, Mh, Mw, H, W, CHANNEL_BLOCKING);
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void copy_naiveF_to_GEMM(const float* kcrs, float* gemm, int R, int S, int C, int K, int RK, int Mw)
{
LIBXSMM_VLA_DECL(8, float, output, gemm, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING, CHANNEL_BLOCKING);
LIBXSMM_VLA_DECL(6, const float, input, kcrs, K, R, S, RK, Mw);
int r, s, c1, c2, k1, k2, rk, m;
for ( k1 = 0; k1 < K/CHANNEL_BLOCKING; k1++ ) {
for ( c1 = 0; c1 < C/CHANNEL_BLOCKING; c1++ ) {
for ( m = 0; m < Mw; m++ ) {
for ( rk = 0; rk < RK; rk++ ) {
for ( r = 0; r < R; r++ ) {
for ( s = 0; s < S; s++ ) {
for ( c2 = 0; c2 < CHANNEL_BLOCKING; c2++ ) {
for ( k2 = 0; k2 < CHANNEL_BLOCKING; k2++ ) {
LIBXSMM_VLA_ACCESS(8, output, k1, c1, m, rk, r, s, c2, k2, C/CHANNEL_BLOCKING, Mw, RK, R, S, CHANNEL_BLOCKING, CHANNEL_BLOCKING) =
LIBXSMM_VLA_ACCESS(6, input, (c1*CHANNEL_BLOCKING)+c2, (k1*CHANNEL_BLOCKING)+k2, r, s, rk, m, C, R, S, RK, Mw);
}
}
}
}
}
}
}
}
}
LIBXSMM_INLINE int is_a_ge_zero_and_a_lt_b(int a, int b) {
return (unsigned int)a < (unsigned int)(b);
}
LIBXSMM_INLINE void naive_convcaps_fp(naive_conv_t* param, const float* input, float* output, const float* filter)
{
int nImg = param->nImg;
int nIfm = param->nIfm;
int nOfm = param->nOfm;
int ifhp = param->ifhp;
int ifwp = param->ifwp;
int ofhp = param->ofhp;
int ofwp = param->ofwp;
int ofh = param->ofh;
int ofw = param->ofw;
int pad_h = param->pad_h;
int pad_w = param->pad_w;
int pad_h_in = param->pad_h_in;
int pad_w_in = param->pad_w_in;
int pad_h_out = param->pad_h_out;
int pad_w_out = param->pad_w_out;
int kh = param->kh;
int kw = param->kw;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int RK = param->RK;
int Mh = param->Mh;
int Mw = param->Mw;
/* loop counters */
int img, ofm, ifm, oj, oi, ij, ii, kj, ki, rk, mj, mi;
LIBXSMM_VLA_DECL(6, float, votes_t, output + (pad_w_out * ofwp + pad_h_out), ofhp, ofwp, nOfm, Mh, Mw);
LIBXSMM_VLA_DECL(6, const float, poses_t, input + (pad_w_in * ifwp + pad_h_in), ifhp, ifwp, nIfm, Mh, RK);
LIBXSMM_VLA_DECL(6, const float, filter_t, filter, nOfm, kh, kw, RK, Mw);
#if defined(_OPENMP)
# pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm, ifm, oj, oi, ij, ii, kj, ki, rk, mj, mi)
#endif
for (img = 0; img < nImg; ++img) {
for (ofm = 0; ofm < nOfm; ++ofm) {
for (oj = 0; oj < ofh; ++oj) {
ij = oj * stride_h - pad_h;
for (oi = 0; oi < ofw; ++oi) {
ii = oi * stride_w - pad_w;
for (mj = 0; mj < Mh; ++mj ) {
for (mi = 0; mi < Mw; ++mi ) {
LIBXSMM_VLA_ACCESS( 6, votes_t, img, oj, oi, ofm, mj, mi, ofhp, ofwp, nOfm, Mh, Mw) = 0.0f;
for (ifm = 0; ifm < nIfm; ++ifm) {
for (kj = 0; kj < kh; ++kj) {
/*if(ij+kj < 0 || ij+kj >= ifh) continue;*/
for (ki = 0; ki < kw; ++ki) {
/*if(ii+ki < 0 || ii+ki >= ifw) continue;*/
for (rk = 0; rk < RK; ++rk ) {
LIBXSMM_VLA_ACCESS( 6, votes_t, img, oj, oi, ofm, mj, mi, ofhp, ofwp, nOfm, Mh, Mw) +=
LIBXSMM_VLA_ACCESS( 6, poses_t, img, ij+kj, ii+ki, ifm, mj, rk, ifhp, ifwp, nIfm, Mh, RK) *
LIBXSMM_VLA_ACCESS( 6, filter_t, ifm, ofm, kj, ki, rk, mi, nOfm, kh, kw, RK, Mw);
}
}
}
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void gemm_convcaps_fp(gemm_conv_t* param, const float* input, float* output, const float* filter, unsigned long long* aoff, unsigned long long* boff)
{
int nImg = param->nImg;
int nBIfm = param->nBIfm;
int nbIfm = param->nbIfm;
int nBOfm = param->nBOfm;
int nbOfm = param->nbOfm;
int ifhp = param->ifhp;
int ifwp = param->ifwp;
int ofhp = param->ofhp;
int ofwp = param->ofwp;
int ofh = param->ofh;
int pad_h = param->pad_h;
int pad_h_in = param->pad_h_in;
int pad_w_in = param->pad_w_in;
int pad_h_out = param->pad_h_out;
int pad_w_out = param->pad_w_out;
int kh = param->kh;
int kw = param->kw;
int stride_h = param->stride_h;
int RK = param->RK;
int Mh = param->Mh;
int Mw = param->Mw;
unsigned long long brcount = param->brcount;
/* loop counters */
int img, ofm1, ifm1, oj, ij, rk, mj, mi;
LIBXSMM_VLA_DECL(7, float, votes_t, output + (pad_w_out * ofwp + pad_h_out), nBOfm, Mh, Mw, ofhp, ofwp, nbOfm);
LIBXSMM_VLA_DECL(7, const float, poses_t, input + (pad_w_in * ifwp + pad_h_in), nBIfm, Mh, RK, ifhp, ifwp, nbIfm);
LIBXSMM_VLA_DECL(8, const float, filter_t, filter, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm);
#if defined(_OPENMP)
# pragma omp parallel for LIBXSMM_OPENMP_COLLAPSE(2) private(img, ofm1, ifm1, oj, ij, mj, mi, rk)
#endif
for (img = 0; img < nImg; ++img) {
for (ofm1 = 0; ofm1 < nBOfm; ++ofm1) {
for (mj = 0; mj < Mh; ++mj ) {
for (mi = 0; mi < Mw; ++mi ) {
for (ifm1 = 0; ifm1 < nBIfm; ++ifm1) {
for (rk = 0; rk < RK; ++rk ) {
for (oj = 0; oj < ofh; ++oj) {
ij = oj * stride_h - pad_h;
if ( rk == 0 && ifm1 == 0 ) {
fwd_brgemmz( &LIBXSMM_VLA_ACCESS(8, filter_t, ofm1, ifm1, mi, rk, 0, 0, 0, 0, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm) /* A */,
&LIBXSMM_VLA_ACCESS(7, poses_t, img, ifm1, mj, rk, ij, 0, 0, nBIfm, Mh, RK, ifhp, ifwp, nbIfm) /* B */,
&LIBXSMM_VLA_ACCESS(7, votes_t, img, ofm1, mj, mi, oj, 0, 0, nBOfm, Mh, Mw, ofhp, ofwp, nbOfm) /* C */,
&brcount, aoff, boff );
} else {
fwd_brgemma( &LIBXSMM_VLA_ACCESS(8, filter_t, ofm1, ifm1, mi, rk, 0, 0, 0, 0, nBIfm, Mw, RK, kh, kw, nbIfm, nbOfm) /* A */,
&LIBXSMM_VLA_ACCESS(7, poses_t, img, ifm1, mj, rk, ij, 0, 0, nBIfm, Mh, RK, ifhp, ifwp, nbIfm) /* B */,
&LIBXSMM_VLA_ACCESS(7, votes_t, img, ofm1, mj, mi, oj, 0, 0, nBOfm, Mh, Mw, ofhp, ofwp, nbOfm) /* C */,
&brcount, aoff, boff );
}
}
}
}
}
}
}
}
}
LIBXSMM_INLINE void compute_broff(gemm_conv_t* param, unsigned long long* aoff, unsigned long long* boff) {
int nbIfm = param->nbIfm;
int nbOfm = param->nbOfm;
int ifwp = param->ifwp;
int kh = param->kh;
int kw = param->kw;
/* loop counters */
int kj, ki, i;
i = 0;
for (kj = 0; kj < kh; ++kj) {
for (ki = 0; ki < kw; ++ki) {
aoff[i] = (kj*(kw*nbIfm*nbOfm) + ki*(nbIfm*nbOfm))*sizeof(float);
boff[i] = (kj*(ifwp*nbIfm) + ki*(nbIfm))*sizeof(float);
i++;
}
}
}
int main(int argc, char* argv[])
{
float *naive_input, *naive_output, *naive_filter;
float *gemm_input, *gemm_output, *gemm_filter;
float *check_output;
unsigned long long *aoff, *boff;
int ifhp, ifwp, ofhp, ofwp, ofh, ofw;
int stride_h, stride_w, pad_h_in, pad_w_in, pad_h_out, pad_w_out;
int ldx;
int brcount;
naive_conv_t naive_param;
gemm_conv_t gemm_param;
correctness_t norms_fwd;
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 100; /* repetitions of benchmark */
int ifw = 16; /* input width, "W" */
int ifh = 16; /* input height, "H" */
int nImg = 128; /* mini-batch size, "N" */
int nIfm = 128; /* number of input feature maps, "C" */
int nOfm = 256; /* number of output feature maps, "K" */
int kh = 3; /* filter height, "R" */
int kw = 3; /* filter width, "S" */
int pad_h = 0; /* padding in output */
int pad_w = 0; /* padding in output */
int stride = 2; /* stride when accessing inputs */
int Mh = 4;
int Mw = 4;
int RK = 4;
char type = 'F'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double flops = 0.0;
int i;
float beta=0.0f;
memset(&norms_fwd, 0, sizeof(norms_fwd));
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("\n\n\nUsage: %s iters H W N C K R S pad stride type(F,B,U,A)\n\n\n", argv[0]);
return -1;
}
srand48(1);
/* reading new values from cli */
i = 1;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) ifw = atoi(argv[i++]);
if (argc > i) ifh = atoi(argv[i++]);
if (argc > i) nImg = atoi(argv[i++]);
if (argc > i) nIfm = atoi(argv[i++]);
if (argc > i) nOfm = atoi(argv[i++]);
if (argc > i) kw = atoi(argv[i++]);
if (argc > i) kh = atoi(argv[i++]);
if (argc > i) pad_w = atoi(argv[i++]);
if (argc > i) pad_h = atoi(argv[i++]);
if (argc > i) stride = atoi(argv[i++]);
if (argc > i) RK = atoi(argv[i++]);
if (argc > i) Mw = atoi(argv[i++]);
if (argc > i) Mh = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
/* apply stride in both dimensions */
stride_w = stride;
stride_h = stride;
/* handle physical padding */
#ifdef USE_PHYSICAL_PADDING
#error "physical padding is not supported right now!"
pad_h_in = pad_h;
pad_w_in = pad_w;
pad_h_out = 0;
pad_w_out = 0;
#else
pad_h_in = 0;
pad_w_in = 0;
pad_h_out = 0;
pad_w_out = 0;
#endif
/* deriving some values image size */
ofh = (ifh + 2 * pad_h - kh) / stride_h + 1;
ofw = (ifw + 2 * pad_w - kw) / stride_w + 1;
ifhp = ifh + 2 * pad_h_in;
ifwp = ifw + 2 * pad_w_in;
ofhp = ofh + 2 * pad_h_out;
ofwp = ofw + 2 * pad_w_out;
/* set struct for naive convolution */
naive_param.nImg = nImg;
naive_param.nIfm = nIfm;
naive_param.nOfm = nOfm;
naive_param.ifhp = ifhp;
naive_param.ifwp = ifwp;
naive_param.ofhp = ofhp;
naive_param.ofwp = ofwp;
naive_param.ifh = ifh;
naive_param.ifw = ifw;
naive_param.ofh = ofh;
naive_param.ofw = ofw;
naive_param.pad_h = pad_h;
naive_param.pad_w = pad_w;
naive_param.pad_h_in = pad_h_in;
naive_param.pad_w_in = pad_w_in;
naive_param.pad_h_out = pad_h_out;
naive_param.pad_w_out = pad_w_out;
naive_param.kh = kh;
naive_param.kw = kw;
naive_param.stride_h = stride_h;
naive_param.stride_w = stride_w;
naive_param.RK = RK;
naive_param.Mh = Mh;
naive_param.Mw = Mw;
/* set struct for naive convolution */
gemm_param.nImg = nImg;
gemm_param.nBIfm = nIfm/CHANNEL_BLOCKING;
gemm_param.nbIfm = CHANNEL_BLOCKING;
gemm_param.nBOfm = nOfm/CHANNEL_BLOCKING;
gemm_param.nbOfm = CHANNEL_BLOCKING;
gemm_param.ifhp = ifhp;
gemm_param.ifwp = ifwp;
gemm_param.ofhp = ofhp;
gemm_param.ofwp = ofwp;
gemm_param.ifh = ifh;
gemm_param.ifw = ifw;
gemm_param.ofh = ofh;
gemm_param.ofw = ofw;
gemm_param.pad_h = pad_h;
gemm_param.pad_w = pad_w;
gemm_param.pad_h_in = pad_h_in;
gemm_param.pad_w_in = pad_w_in;
gemm_param.pad_h_out = pad_h_out;
gemm_param.pad_w_out = pad_w_out;
gemm_param.kh = kh;
gemm_param.kw = kw;
gemm_param.stride_h = stride_h;
gemm_param.stride_w = stride_w;
gemm_param.RK = RK;
gemm_param.Mh = Mh;
gemm_param.Mw = Mw;
/* compute brcount */
brcount = kh*kw;
gemm_param.brcount = brcount;
/* some empty lines at the beginning */
printf("\n\n\n");
/* print some summary */
printf("##########################################\n");
printf("# Setting Up #\n");
printf("##########################################\n");
printf("PARAMS: W:%d H:%d N:%d C:%d K:%d R:%d S:%d P:%d Q:%d STRIDE: %d RK: %d Mh: %d Mw: %d\n", ifw, ifh, nImg, nIfm, nOfm, kw, kh, ofh, ofw, stride, RK, Mh, Mw);
printf("PARAMS: ITERS:%d Threads:%d\n", iters, nThreads);
printf(" InImg %dx%d Padded (%dx%d)\n", ifh, ifw, ifhp, ifwp);
printf("OutImg %dx%d Padded (%dx%d)\n", ofh, ofw, ofhp, ofwp);
printf("SIZE Poses (MB): %10.2f MiB\n", (double)(nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Votes (MB): %10.2f MiB\n", (double)(nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Poses (1): %10.2f MiB\n", (double)(1*nIfm*ifhp*ifwp*Mh*RK* sizeof(float))/(1024.0*1024.0) );
printf("SIZE Votes (1): %10.2f MiB\n", (double)(1*nOfm*ofhp*ofwp*Mh*Mw* sizeof(float))/(1024.0*1024.0) );
printf("SIZE Weight : %10.2f MiB\n", (double)(nIfm*nOfm*kw*kh*Mw*RK* sizeof(float))/(1024.0*1024.0) );
/* check for pass to run */
if (type != 'A' && type != 'F' && type != 'B' && type != 'U') {
printf("\ntype needs to be 'A' (All), 'F' (FP only), 'B' (BP only), 'U' (WU only)\n\n\n");
return -1;
}
if ((nIfm % CHANNEL_BLOCKING != 0) || (nOfm % CHANNEL_BLOCKING != 0) ) {
printf("\nThis code only works for ofm/ifm mod %i = 0!\n\n\n", CHANNEL_BLOCKING);
return -1;
}
if (pad_w !=0 || pad_h !=0 || pad_h_in != 0 || pad_w_in != 0 || pad_h_out !=0 || pad_w_out != 0) {
printf("\nThis code doesn't support padding right now\n!");
return -1;
}
/* apply stride in both dimensions */
/* JIT GEMM kernel */
ldx = stride_w*CHANNEL_BLOCKING;
fwd_brgemmz = libxsmm_smmdispatch_reducebatch_offs_unroll(CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, brcount, NULL, &ldx, NULL, NULL, &beta, NULL, NULL);
fwd_brgemma = libxsmm_smmdispatch_reducebatch_offs_unroll(CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, brcount, NULL, &ldx, NULL, NULL, NULL, NULL, NULL);
printf("BRGEMM FWD col-major: m=%d, n=%d, k=%d, lda=%d, ldb=%d, ldc=%d, transa='n', transb='n', alpha=1.0, beta=1.0, brcount=%d\n", CHANNEL_BLOCKING, ofwp, CHANNEL_BLOCKING, CHANNEL_BLOCKING, stride_w*CHANNEL_BLOCKING, CHANNEL_BLOCKING, brcount);
/* allocate data */
naive_input = (float*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float), 2097152);
naive_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152);
naive_filter = (float*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(float), 2097152);
gemm_input = (float*)libxsmm_aligned_malloc( nImg*nIfm*ifhp*ifwp*Mh*RK*sizeof(float), 2097152);
gemm_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152);
gemm_filter = (float*)libxsmm_aligned_malloc( nOfm*nIfm*kh*kw*Mw*RK* sizeof(float), 2097152);
check_output = (float*)libxsmm_aligned_malloc( nImg*nOfm*ofhp*ofwp*Mh*Mw*sizeof(float), 2097152);
aoff = (unsigned long long*)libxsmm_aligned_malloc( brcount*sizeof(unsigned long long), 2097152);
boff = (unsigned long long*)libxsmm_aligned_malloc( brcount*sizeof(unsigned long long), 2097152);
/* initialize data */
init_buf(naive_input, nImg*nIfm*ifhp*ifwp*Mh*RK, 0, 0);
set_zeropad_nchw(naive_input, nImg, nIfm, ifhp, ifwp, Mh, RK, pad_h_in, pad_w_in);
init_buf(naive_filter, nOfm*nIfm*kh*kw*Mw*RK, 0, 0);
zero_buf(naive_output, nImg*nOfm*ofhp*ofwp*Mw*Mh);
/* copy data into GEMM optimized format */
copy_naiveP_to_GEMM(naive_input, gemm_input, nImg, ifhp, ifwp, nIfm, Mh, RK);
copy_naiveF_to_GEMM(naive_filter, gemm_filter, kh, kw, nIfm, nOfm, RK, Mw);
zero_buf(gemm_output, nImg*nOfm*ofhp*ofwp*Mw*Mh);
/* compute BRGEMM offsets */
compute_broff( &gemm_param, aoff, boff );
/* check correctness forward */
if (type == 'A' || type == 'F') {
printf("##########################################\n");
printf("# Correctness - FWD (custom-Storage) #\n");
printf("##########################################\n");
/* run naive convolution */
naive_convcaps_fp(&naive_param, naive_input, naive_output, naive_filter);
gemm_convcaps_fp(&gemm_param, gemm_input, gemm_output, gemm_filter, aoff, boff);
copy_GEMM_to_naiveV(gemm_output, check_output, nImg, ofhp, ofwp, nOfm, Mh, Mw);
/* compare */
compare_buf(naive_output, check_output, nImg*nOfm*ofhp*ofwp*Mh*Mw, &norms_fwd);
printf(" 1-norm of reference: %f\n", norms_fwd.one_norm_ref);
printf(" 1-norm of GEMM-code: %f\n", norms_fwd.one_norm_test);
printf(" L2-error-norm of GEMM-code: %f\n", norms_fwd.l2_rel_err);
printf(" inf-norm of comp. rel. error: %f\n", norms_fwd.max_rel_err);
printf(" inf-norm of comp. abs. error: %f\n", norms_fwd.max_abs_err);
}
/* benchmark forward */
if (type == 'A' || type == 'F') {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
/* run LIBXSMM convolution for performance */
l_start = libxsmm_timer_tick();
for (i = 0; i < iters; ++i) {
gemm_convcaps_fp(&gemm_param, gemm_input, gemm_output, gemm_filter, aoff, boff);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
flops = (double)nImg * (double)nIfm * (double)nOfm * (double)ofh * (double)ofw * (double)(2 * kh * kw) * (double)RK * (double)Mh * (double)Mw * (double)iters;
printf("GFLOP = %.5g\n", flops*1e-9/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", (flops*1e-9)/l_total);
printf("PERFDUMP,FP,%s,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%i,%.5g,%.5g,%f,%f,%f,%f,%f\n", LIBXSMM_VERSION, nThreads, nImg, nIfm, nOfm,
ifw, ifh, kw, kh, stride, pad_h, pad_w, RK, Mh, Mw, ((double)(l_total/iters)), (flops*1e-9)/l_total,
norms_fwd.max_rel_err, norms_fwd.max_abs_err, norms_fwd.l2_rel_err, norms_fwd.one_norm_ref, norms_fwd.one_norm_test );
}
/* deallocate data */
libxsmm_free(naive_input);
libxsmm_free(naive_output);
libxsmm_free(naive_filter);
libxsmm_free(gemm_input);
libxsmm_free(gemm_output);
libxsmm_free(gemm_filter);
libxsmm_free(check_output);
libxsmm_free(aoff);
libxsmm_free(boff);
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
fflush.c | // Copyright (c) 2018 - 2020 Arm, Ltd
#define _GNU_SOURCE
#include <shmem.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include "timer.h"
#define GET_INT_SHMEM_FP 0
#define FLUSH_SHMEM_FP 1
#define PUT_INT_SHMEM_FP 2
#define GET_INT_FILE 3
#define FLUSH_FILE 4
#define PUT_INT_FILE 5
#define N_TIMERS 6
const size_t fsize = 1L<<28;
const int num_iters = 1;
#define nints (1<<20)
static int ibuf[nints];
my_timer_t timers[N_TIMERS];
void make_file(char *fname)
{
size_t tot_ints = fsize / sizeof(int);
int fd = open(fname, O_CREAT | O_RDWR, S_IRWXU);
if (fd < 0) {
perror("failed to open file");
return;
}
printf("Creating %llu bytes of data into fd %d\n", nints * sizeof(int), fd);
#pragma omp parallel for
for (size_t idx = 0; idx < nints; idx++) {
ibuf[idx] = rand();
}
for (size_t idx = 0; idx < tot_ints; idx += nints) {
write(fd, ibuf, nints * sizeof(int));
}
close(fd);
}
void rw_file(char *fname)
{
int me = shmem_my_pe ();
int npes = shmem_n_pes ();
int fd = open(fname, O_SYNC | O_DIRECT | O_RDWR);
shmem_barrier_all();
size_t perpe = fsize / npes;
size_t int_perpe = perpe / sizeof(int);
size_t my_offset = perpe * me;
size_t *roffs;
size_t iters;
printf ("%d: Access %d ints at address [%lx:%lx] (size=%u)\n",
me, int_perpe, (long unsigned)my_offset, (long unsigned)my_offset + perpe, perpe);
timer_start(&(timers[GET_INT_FILE]));
lseek(fd, my_offset, SEEK_SET);
for (size_t idx = 0; idx < int_perpe; idx += nints) {
const size_t nread = (idx + nints) > int_perpe ? int_perpe - idx : nints;
read(fd, ibuf, nread * sizeof(int));
}
timer_stop(&(timers[GET_INT_FILE]));
printf ("%d: My two ints in the file are: %d, %d\n", me, ibuf[0], ibuf[1]);
ibuf[0] += me + 1;
ibuf[1] += me + 1;
timer_start(&(timers[PUT_INT_FILE]));
lseek(fd, my_offset, SEEK_SET);
for (size_t idx = 0; idx < int_perpe; idx += nints) {
const size_t nwrite = (idx + nints) > int_perpe ? int_perpe - idx : nints;
write(fd, ibuf, nwrite * sizeof(int));
}
timer_stop(&(timers[PUT_INT_FILE]));
timer_start(&(timers[FLUSH_FILE]));
fsync(fd);
timer_stop(&(timers[FLUSH_FILE]));
shmem_barrier_all();
close(fd);
}
void rw_fspace(shmem_fspace_t fid, char *fname)
{
int me = shmem_my_pe ();
int npes = shmem_n_pes ();
shmem_fspace_stat_t fstat;
if (shmem_fspace_stat(fid, &fstat) != 0) {
printf ("Failed to stat fspace\n");
return;
}
printf ("shmem_fopen: the fspace is accessible on pe (%d:%d)\n",
fstat.pe_start, fstat.pe_start + fstat.pe_size - 1);
//int nfpes = npes;
//while (nfpes > fstat.pe_size) {
// nfpes >>= 1;
//}
int err;
shmem_fp_t *fp = shmem_open(fid, fname, fsize, -1, -1, 1, -1, &err);
if (fp == NULL) {
printf ("Failed to open file. Got NULL pointer. Error code is %d\n", err);
return;
}
printf ("shmem_fopen: fp returned is %p, addr=%lx, size=%u, unit size=%d, pe [%d:%d] by %d\n",
fp, (long unsigned)fp->addr, fp->size, fp->unit_size, fp->pe_start,
fp->pe_start + fp->pe_size - 1, fp->pe_stride);
if (fp->size < fsize) {
printf ("File is incorrect size. %llu < %llu\n", fp->size, fsize);
shmem_close(fp, 0);
return;
}
int pes_per_fpe = npes / fp->pe_size;
int my_fpe = fp->pe_start + (me / pes_per_fpe);
printf ("%d: There are %d pes per fpe. My fpe is %d\n", me, pes_per_fpe, my_fpe);
size_t perpe = fsize / npes;
size_t int_perpe = perpe / sizeof(int);
char *my_mem = (char*)(fp->addr + (perpe * (me % pes_per_fpe)));
printf ("%d: Access %d ints into ibuf %p at address [%lx:%lx] (size=%u) on fpe %d\n",
me, int_perpe, ibuf, (long unsigned)my_mem, (long unsigned)my_mem + perpe, perpe, my_fpe);
timer_start(&(timers[GET_INT_SHMEM_FP]));
for (size_t idx = 0; idx < int_perpe; idx += nints) {
const size_t nget = (idx + nints) > int_perpe ? int_perpe - idx : nints;
shmem_int_get(ibuf, (int*)my_mem + idx, nget, my_fpe);
}
timer_stop(&(timers[GET_INT_SHMEM_FP]));
printf ("%d: My two ints in the file are: %d, %d\n", me, ibuf[0], ibuf[1]);
ibuf[0] += me + 1;
ibuf[1] += me + 1;
timer_start(&(timers[PUT_INT_SHMEM_FP]));
for (size_t idx = 0; idx < int_perpe; idx += nints) {
const size_t nput = (idx + nints) > int_perpe ? int_perpe - idx : nints;
shmem_int_put((int*)my_mem + idx, ibuf, nput, my_fpe);
}
timer_stop(&(timers[PUT_INT_SHMEM_FP]));
timer_start(&(timers[FLUSH_SHMEM_FP]));
if (me == 0) {
shmem_fp_flush(fp, 0);
}
timer_stop(&(timers[FLUSH_SHMEM_FP]));
shmem_barrier_all();
shmem_close(fp, 0);
}
int main (int argc, char **argv)
{
int me, npes;
char hostname[1024];
gethostname(hostname, 1024);
if ((argc != 2) && (argc != 4)) {
printf ("Usage: %s FNAME [HOST PORT]\n", argv[0]);
printf ("argc = %d\n", argc);
return 1;
}
char *fname = argv[1];
if (argc == 2) {
make_file(fname);
return 0;
}
shmem_fspace_conx_t conx;
conx.storage_server_name = argv[2];
conx.storage_server_port = atoi(argv[3]);
shmem_init();
me = shmem_my_pe ();
npes = shmem_n_pes ();
printf ("shmem_fopen: hello from node %4d of %4d on %s\n", me, npes, hostname);
fflush(stdout);
shmemio_set_loglvl("info");
printf ("shmem_fopen: %d: Connect to %s:%d\n", me, conx.storage_server_name, conx.storage_server_port);
shmem_fspace_t fid = shmem_connect(&conx);
if (fid == SHMEM_NULL_FSPACE) {
printf ("shmem_fopen: connect failed\n");
}
else {
for (int idx = 0; idx < N_TIMERS; idx++) {
timer_reset(timers + idx);
}
for (int idx = 0; idx < num_iters; idx++) {
rw_fspace(fid, fname);
rw_file(fname);
}
shmem_disconnect(fid);
}
shmem_barrier_all();
print_serial(&(timers[GET_INT_FILE]), "get file", me, npes);
print_serial(&(timers[PUT_INT_FILE]), "put file", me, npes);
print_serial(&(timers[FLUSH_FILE]), "flush file", me, npes);
print_serial(&(timers[GET_INT_SHMEM_FP]), "get fspace", me, npes);
print_serial(&(timers[PUT_INT_SHMEM_FP]), "put fspace", me, npes);
print_serial(&(timers[FLUSH_SHMEM_FP]), "flush fp", me, npes);
shmem_finalize();
}
|
GB_binop__plus_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__plus_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8)
// A*D function (colscale): GB (_AxD__plus_int8)
// D*A function (rowscale): GB (_DxB__plus_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8)
// C=scalar+B GB (_bind1st__plus_int8)
// C=scalar+B' GB (_bind1st_tran__plus_int8)
// C=A+scalar GB (_bind2nd__plus_int8)
// C=A'+scalar GB (_bind2nd_tran__plus_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2013 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A varray of c_expr_t. */
/* Append a new c_expr_t element to V. */
#define C_EXPR_APPEND(V, ELEM) \
do { \
c_expr_t __elem = (ELEM); \
vec_safe_push (V, __elem); \
} while (0)
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (struct c_expr, bool, struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
elim.c | /*!
* \file elim.c
* \author Jun Yoshida
* \copyright (c) Jun Yoshida 2019
* The project is released under BSD3 License.
*/
#include "elim.h"
#include <stdlib.h>
#include <omp.h>
#include "common.h"
#include "elementary.h"
/* Debug
#include <stdio.h>
#define DEBUG_MSG fprintf(stderr, "%s:%d\n", __func__, __LINE__);
// */ #define DEBUG_MSG
/*!
* Transform a given matrix A into UA' where
* - U is an invertible matrix.
* - A' is an upper-triangular matrix all whose column vectors has only 0 but at most single 1 in the entries.
*/
size_t elim_rows(matrix_type * restrict u, matrix_type * restrict uinv, matrix_type * restrict mat)
{
matrix_type u_ = u ? *u : MATRIX_ZEROROW(mat->r);
matrix_type uinv_ = uinv ? *uinv : MATRIX_ZEROCOL(mat->r);
size_t rank = 0;
for (size_t j = 0; j < mat->c && rank < mat->r; ++j) {
size_t i = rank;
// Find the topmost non-zero entry in j-th column below the rank-th row.
while (i < mat->r) {
if (MATRIX_AT(*mat, i, j))
break;
++i;
}
// It not found, skip the column.
if( i >= mat->r )
continue;
// Swap the row with rank-th row.
if (i > rank) {
swap_rows(i, rank, mat);
swap_rows(i, rank, &uinv_);
swap_columns(i, rank, &u_);
}
// Row-elimination with respect to rank-th row;
for(size_t k = 0; k < mat->r; ++k) {
if (k != rank && MATRIX_AT(*mat, k, j)) {
axpy_rows(1, rank, k, mat);
axpy_rows(1, rank, k, &uinv_);
axpy_columns(1, k, rank, &u_);
}
}
// Increment the rank counter.
++rank;
}
return rank;
}
size_t elim_off_diag(matrix_type * restrict u, matrix_type * restrict uinv, matrix_type * restrict mat, matrix_type * restrict v, matrix_type * restrict vinv)
{
// Eliminate columns first
transpose(mat);
if (v) transpose(v);
if (vinv) transpose(vinv);
elim_rows(v, vinv, mat);
transpose(mat);
if (v) transpose(v);
if (vinv) transpose(vinv);
// Eliminate rows and return the rank.
return elim_rows(u, uinv, mat);
}
size_t diag_rep(matrix_type * restrict a, matrix_type * restrict mat, matrix_type * restrict b)
{
/* auxiliary matrix: column major */
matrix_type aux = {
.p = calloc(a->r * a->r, sizeof(target_type)),
.r = a->r,
.c = a->r,
.Xr = 1,
.Xc = a->r,
};
/* Initialize aux into the identity matrix. */
#pragma omp parallel for
for(size_t i = 0; i < a->r; ++i)
MATRIX_AT(aux,i,i) = 1;
// Make mat into a diagonal matrix.
size_t rank = elim_off_diag(&aux, NULL, mat, NULL, b);
/* Multiply A by U which is as simple as possible. */
aux.c = rank;
elim_rows(a, NULL, &aux);
/* We will not use aux any more. */
free(aux.p);
/* Make the kernel vectors cleaner. */
if (rank < mat->r) {
matrix_type bker = {
.p = b->p + rank * b->Xc,
.r = b->c - rank,
.c = b->r,
.Xr = b->Xc,
.Xc = b->Xr
};
/* Column elimination on kernel vectors of b
* Note that bker is already transposed in the initialization above.
*/
elim_rows(NULL, NULL, &bker);
}
return rank;
}
|
dnnl_quantize_v2-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file dnnl_quantize_v2-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_
#if MXNET_USE_ONEDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "../../nn/dnnl/dnnl_base-inl.h"
#include "../quantize_v2-inl.h"
namespace mxnet {
namespace op {
class SgDNNLQuantizeOperator {
public:
explicit SgDNNLQuantizeOperator(const nnvm::NodeAttrs& attrs)
: param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {}
void Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
private:
bool initalized_{false};
QuantizeV2Param param_;
float cached_data_min_{0.f};
float cached_data_max_{0.f};
dnnl::memory::desc o_desc_;
dnnl_args_map_t args_;
std::shared_ptr<dnnl::reorder> fwd_pd_;
};
void SgDNNLQuantizeOperator::Forward(const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
float quantized_range = 0.0;
NDArray in_buffer = inputs[0];
float data_min = mshadow::red::limits::MaxValue<float>();
float data_max = mshadow::red::limits::MinValue<float>();
// Pass through quantized data
if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) {
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
*outputs[1].data().dptr<float>() = param_.min_calib_range.value();
*outputs[2].data().dptr<float>() = param_.max_calib_range.value();
} else {
if (inputs[0].dtype() == mshadow::kUint8) {
*outputs[1].data().dptr<float>() = 0;
*outputs[2].data().dptr<float>() = kUint8Range;
} else {
*outputs[1].data().dptr<float>() = -kInt8Range;
*outputs[2].data().dptr<float>() = kInt8Range;
}
}
if (req[0] != kWriteInplace) {
const_cast<NDArray&>(outputs[0]).CopyFrom(*inputs[0].GetDNNLData());
DNNLStream::Get()->Submit();
}
} else {
if (in_buffer.IsView() && in_buffer.IsDNNLData())
in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetDNNLData();
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
data_min = param_.min_calib_range.value();
data_max = param_.max_calib_range.value();
} else {
// no calib info
in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<float>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> data_maxs(nthreads, data_max);
std::vector<float> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid])
data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid])
data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max)
data_max = data_maxs[i];
if (data_mins[i] < data_min)
data_min = data_mins[i];
}
if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max))
initalized_ = false;
}
// Write output min/max
auto out_type = GetQuantizeOutputType(param_);
if (out_type == mshadow::kUint8) {
quantized_range = kUint8Range;
*outputs[1].data().dptr<float>() = data_min;
*outputs[2].data().dptr<float>() = data_max;
} else if (out_type == mshadow::kInt8) {
float real_range = MaxAbs(data_min, data_max);
quantized_range = kInt8Range;
*outputs[1].data().dptr<float>() = -real_range;
*outputs[2].data().dptr<float>() = real_range;
} else {
LOG(FATAL) << "dnnl quantize op only supports int8 and uint8 as output type";
}
if (!initalized_) {
cached_data_min_ = data_min;
cached_data_max_ = data_max;
float real_range = MaxAbs(data_min, data_max);
float scale = quantized_range / real_range;
dnnl::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
dnnl::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
auto i_desc = i_mem->get_desc();
size_t i_ndim = in_buffer.shape().ndim();
if (i_ndim == 4) {
dnnl::memory::format_tag o_fmt = dnnl::memory::format_tag::nhwc;
dnnl::memory::dims o_dims(i_desc.data.dims, i_desc.data.dims + i_desc.data.ndims);
o_desc_ = dnnl::memory::desc(o_dims, get_dnnl_type(out_type), o_fmt);
} else {
o_desc_ = i_desc;
o_desc_.data.data_type = get_dnnl_type_t(out_type);
}
auto reorder_pd =
dnnl::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc_, attr);
fwd_pd_ = std::make_shared<dnnl::reorder>(reorder_pd);
initalized_ = true;
}
auto o_mem = CreateDNNLMem(outputs[0], o_desc_, req[0]);
args_[DNNL_ARG_FROM] = *i_mem;
args_[DNNL_ARG_TO] = *o_mem.second;
DNNLStream::Get()->RegisterPrimArgs(*fwd_pd_, args_);
CommitOutput(outputs[0], o_mem);
DNNLStream::Get()->Submit();
}
}
static void SgDNNLQuantizeForward(const OpStatePtr& state_ptr,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
SgDNNLQuantizeOperator& op = state_ptr.get_state<SgDNNLQuantizeOperator>();
op.Forward(ctx, inputs, req, outputs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_QUANTIZE_V2_INL_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.